diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
new file mode 100644
index 0000000000..79e62b6891
--- /dev/null
+++ b/.github/workflows/ci.yaml
@@ -0,0 +1,50 @@
+name: ci
+
+on:
+ pull_request:
+ branches:
+ - master
+ - release-*
+
+jobs:
+
+ pull:
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ job:
+ - verify
+ - build
+ - test
+ - e2e-examples
+ steps:
+ - name: Set up Go 1.13
+ uses: actions/setup-go@v1
+ with:
+ go-version: 1.13
+ id: go
+
+ - name: Check out code into the Go module directory
+ uses: actions/checkout@v2
+ with:
+ ref: ${{ github.event.pull_request.head.sha }}
+ path: go/src/github.com/${{ github.repository }}
+
+ - name: ${{ matrix.job }}
+ run: |
+ # workaround for https://github.com/actions/setup-go/issues/14
+ export GOPATH=${GITHUB_WORKSPACE}/go
+ export PATH=$PATH:$GOPATH/bin
+ if [[ "$job" == "verify" ]]; then
+ make check-setup check
+ elif [[ "$job" == "build" ]]; then
+ make docker e2e-docker cli debug-build-docker
+ elif [[ "$job" == "test" ]]; then
+ make test GOFLAGS=-race
+ else
+ make $job
+ fi
+ working-directory: ${{ github.workspace }}/go/src/github.com/${{ github.repository }}
+ env:
+ job: ${{ matrix.job }}
diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml
new file mode 100644
index 0000000000..d51da31113
--- /dev/null
+++ b/.github/workflows/stale.yaml
@@ -0,0 +1,20 @@
+name: "Close stale issues/prs"
+on:
+ schedule:
+ - cron: "0 0 * * *"
+
+jobs:
+ stale:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/stale@v1.1.0
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ stale-issue-message: 'This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 15 days'
+ stale-pr-message: 'This pr is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 15 days'
+ days-before-stale: 60
+ days-before-close: 15
+ stale-issue-label: 'lifecycle/stale'
+ stale-pr-label: 'lifecycle/stale'
+ exempt-issue-label: 'lifecycle/frozen'
+ exempt-pr-label: 'lifecycle/frozen'
diff --git a/.gitignore b/.gitignore
index 4d93436715..9976dd0eef 100644
--- a/.gitignore
+++ b/.gitignore
@@ -37,5 +37,7 @@ kubeconfig
# local output directory
/output/
-# local artifacts directory
-/artifacts/
+# kubetest2 default artifacts directory
+/_artifacts/
+
+.DS_Store
diff --git a/CHANGELOG-1.1.md b/CHANGELOG-1.1.md
index 1166452ee5..75203c9d48 100644
--- a/CHANGELOG-1.1.md
+++ b/CHANGELOG-1.1.md
@@ -1,10 +1,121 @@
+# TiDB Operator v1.1.0-rc.2 Release Notes
+
+This is the second release candidate of `v1.1.0`, which focuses on the usability, extensibility and security of TiDB Operator. While we encourage usage in non-critical environments, it is **NOT** recommended to use this version in critical environments.
+
+## Notable Changes
+
+- Add `status` field for `TidbAutoScaler` CR ([#2182](https://github.com/pingcap/tidb-operator/pull/2182), [@Yisaer](https://github.com/Yisaer))
+- Add `spec.pd.maxFailoverCount` field to limit max failover replicas for PD ([#2184](https://github.com/pingcap/tidb-operator/pull/2184), [@cofyc](https://github.com/cofyc))
+- Emit more events for `TidbCluster` and `TidbClusterAutoScaler` to help users know TiDB running status ([#2150](https://github.com/pingcap/tidb-operator/pull/2150), [@Yisaer](https://github.com/Yisaer))
+- Add the `AGE` column to show creation timestamp for all CRDs ([#2168](https://github.com/pingcap/tidb-operator/pull/2168), [@cofyc](https://github.com/cofyc))
+- Add a switch to skip PD Dashboard TLS configuration ([#2143](https://github.com/pingcap/tidb-operator/pull/2143), [@weekface](https://github.com/weekface))
+- Change TiDB pod `readiness` probe from `HTTPGet` to `TCPSocket` 4000 port ([#2139](https://github.com/pingcap/tidb-operator/pull/2139), [@weekface](https://github.com/weekface))
+- Support deploying TiFlash with TidbCluster CR ([#2157](https://github.com/pingcap/tidb-operator/pull/2157), [@DanielZhangQD](https://github.com/DanielZhangQD))
+- Add TLS support for TiKV metrics API ([#2137](https://github.com/pingcap/tidb-operator/pull/2137), [@weekface](https://github.com/weekface))
+- Set PD DashboardConfig when TLS between the MySQL client and TiDB server is enabled ([#2085](https://github.com/pingcap/tidb-operator/pull/2085), [@weekface](https://github.com/weekface))
+- Remove unnecessary informer caches to reduce the memory footprint of tidb-controller-manager ([#1504](https://github.com/pingcap/tidb-operator/pull/1504), [@aylei](https://github.com/aylei))
+- Fix the failure that Helm cannot load the kubeconfig file when deleting the tidb-operator release during `terraform destroy` ([#2148](https://github.com/pingcap/tidb-operator/pull/2148), [@DanielZhangQD](https://github.com/DanielZhangQD))
+- Support configuring the Webhook TLS setting by loading a secret ([#2135](https://github.com/pingcap/tidb-operator/pull/2135), [@Yisaer](https://github.com/Yisaer))
+- Support TiFlash in TidbCluster CR ([#2122](https://github.com/pingcap/tidb-operator/pull/2122), [@DanielZhangQD](https://github.com/DanielZhangQD))
+- Fix the error that alertmanager couldn't be set in `TidbMonitor` ([#2108](https://github.com/pingcap/tidb-operator/pull/2108), [@Yisaer](https://github.com/Yisaer))
+
+
+# TiDB Operator v1.1.0-rc.1 Release Notes
+
+This is a release candidate of `v1.1.0`, which focuses on the usability, extensibility and security of TiDB Operator. While we encourage usage in non-critical environments, it is **NOT** recommended to use this version in critical environments.
+
+## Action Required
+
+- `--advertise-address` will be configured for `tidb-server`, which would trigger rolling-upgrade for the `tidb-server` component. You can set `spec.paused` to `true` before upgrading tidb-operator to avoid the rolling upgrade, and set it back to `false` when you are ready to upgrade your tidb server ([#2076](https://github.com/pingcap/tidb-operator/pull/2076), [@cofyc](https://github.com/cofyc))
+- Add the `tlsClient.tlsSecret` field in the backup and restore spec, which supports specifying a secret name that includes the cert ([#2003](https://github.com/pingcap/tidb-operator/pull/2003), [@shuijing198799](https://github.com/shuijing198799))
+
+
+## Other Notable Changes
+
+- Use `tidb-lightning` in `Restore` instead of `loader` ([#2068](https://github.com/pingcap/tidb-operator/pull/2068), [@Yisaer](https://github.com/Yisaer))
+- Add `cert-allowed-cn` support to TiDB components ([#2061](https://github.com/pingcap/tidb-operator/pull/2061), [@weekface](https://github.com/weekface))
+- Fix the PD `location-labels` configuration ([#1941](https://github.com/pingcap/tidb-operator/pull/1941), [@aylei](https://github.com/aylei))
+- Able to pause and unpause tidb cluster deployment via `spec.paused` ([#2013](https://github.com/pingcap/tidb-operator/pull/2013), [@cofyc](https://github.com/cofyc))
+- Default the `max-backups` for TiDB server configuration to `3` if the TiDB cluster is deployed by CR ([#2045](https://github.com/pingcap/tidb-operator/pull/2045), [@Yisaer](https://github.com/Yisaer))
+- Able to configure custom environments for components ([#2052](https://github.com/pingcap/tidb-operator/pull/2052), [@cofyc](https://github.com/cofyc))
+- Fix the error that `kubectl get tc` cannot show correct images ([#2031](https://github.com/pingcap/tidb-operator/pull/2031), [@Yisaer](https://github.com/Yisaer))
+- 1. Default the `spec.tikv.maxFailoverCount` and `spec.tidb.maxFailoverCount` to `3` when they are not defined
+ 2. Disable auto-failover when `maxFailoverCount` is set to `0` ([#2015](https://github.com/pingcap/tidb-operator/pull/2015), [@Yisaer](https://github.com/Yisaer))
+- Support deploying TiDB clusters with TidbCluster and TidbMonitor CRs via Terraform on ACK ([#2012](https://github.com/pingcap/tidb-operator/pull/2012), [@DanielZhangQD](https://github.com/DanielZhangQD))
+- Update PDConfig for TidbCluster to PD v3.1.0 ([#1928](https://github.com/pingcap/tidb-operator/pull/1928), [@Yisaer](https://github.com/Yisaer))
+- Support deploying TiDB clusters with TidbCluster and TidbMonitor CRs via Terraform on AWS ([#2004](https://github.com/pingcap/tidb-operator/pull/2004), [@DanielZhangQD](https://github.com/DanielZhangQD))
+- Update TidbConfig for TidbCluster to TiDB v3.1.0 ([#1906](https://github.com/pingcap/tidb-operator/pull/1906), [@Yisaer](https://github.com/Yisaer))
+- Allow users to define resources for initContainers in TiDB initializer job ([#1938](https://github.com/pingcap/tidb-operator/pull/1938), [@tfulcrand](https://github.com/tfulcrand))
+- Add TLS support for Pump and Drainer ([#1979](https://github.com/pingcap/tidb-operator/pull/1979), [@weekface](https://github.com/weekface))
+- Add documents and examples for auto-scaler and initializer ([#1772](https://github.com/pingcap/tidb-operator/pull/1772), [@Yisaer](https://github.com/Yisaer))
+- 1. Add check to guarantee the NodePort won't be changed if the serviceType of TidbMonitor is NodePort
+ 2. Add EnvVar sort to avoid the monitor rendering different results from the same TidbMonitor spec
+ 3. Fix the problem that the TidbMonitor LoadBalancer IP is not used ([#1962](https://github.com/pingcap/tidb-operator/pull/1962), [@Yisaer](https://github.com/Yisaer))
+- Make tidb-initializer support TLS ([#1931](https://github.com/pingcap/tidb-operator/pull/1931), [@weekface](https://github.com/weekface))
+- 1. Fix the problem that Advanced StatefulSet cannot work with webhook
+ 2. Change the Reaction for the Down State TiKV pod during deleting request in webhook from admit to reject ([#1963](https://github.com/pingcap/tidb-operator/pull/1963), [@Yisaer](https://github.com/Yisaer))
+- Fix the drainer installation error when `drainerName` is set ([#1961](https://github.com/pingcap/tidb-operator/pull/1961), [@DanielZhangQD](https://github.com/DanielZhangQD))
+- Fix some TiKV configuration keys in toml ([#1887](https://github.com/pingcap/tidb-operator/pull/1887), [@aylei](https://github.com/aylei))
+- Support using a remote directory as data source for tidb-lightning ([#1629](https://github.com/pingcap/tidb-operator/pull/1629), [@aylei](https://github.com/aylei))
+- Add the API document and a script that generates documentation ([#1945](https://github.com/pingcap/tidb-operator/pull/1945), [@Yisaer](https://github.com/Yisaer))
+- Add the tikv-importer chart ([#1910](https://github.com/pingcap/tidb-operator/pull/1910), [@shonge](https://github.com/shonge))
+- Fix the Prometheus scrape config issue while TLS is enabled ([#1919](https://github.com/pingcap/tidb-operator/pull/1919), [@weekface](https://github.com/weekface))
+- Enable TLS between TiDB components ([#1870](https://github.com/pingcap/tidb-operator/pull/1870), [@weekface](https://github.com/weekface))
+- Fix the timeout error when `.Values.admission.validation.pods` is `true` during the TiKV upgrade ([#1875](https://github.com/pingcap/tidb-operator/pull/1875), [@Yisaer](https://github.com/Yisaer))
+- Enable TLS for MySQL clients ([#1878](https://github.com/pingcap/tidb-operator/pull/1878), [@weekface](https://github.com/weekface))
+- Fix the bug which would cause broken TiDB image property ([#1860](https://github.com/pingcap/tidb-operator/pull/1860), [@Yisaer](https://github.com/Yisaer))
+- TidbMonitor would use its namespace for the targetRef if it is not defined ([#1834](https://github.com/pingcap/tidb-operator/pull/1834), [@Yisaer](https://github.com/Yisaer))
+- Support starting tidb-server with `--advertise-address` parameter ([#1859](https://github.com/pingcap/tidb-operator/pull/1859), [@LinuxGit](https://github.com/LinuxGit))
+- Backup/Restore: support configuring TiKV GC life time ([#1835](https://github.com/pingcap/tidb-operator/pull/1835), [@LinuxGit](https://github.com/LinuxGit))
+- Support no secret for S3/Ceph when the OIDC authentication is used ([#1817](https://github.com/pingcap/tidb-operator/pull/1817), [@tirsen](https://github.com/tirsen))
+- 1. Change the setting from the previous `admission.hookEnabled.pods` to the `admission.validation.pods`
+ 2. Change the setting from the previous `admission.hookEnabled.statefulSets` to the `admission.validation.statefulSets`
+ 3. Change the setting from the previous `admission.hookEnabled.validating` to the `admission.validation.pingcapResources`
+ 4. Change the setting from the previous `admission.hookEnabled.defaulting` to the `admission.mutation.pingcapResources`
+ 5. Change the setting from the previous `admission.failurePolicy.defaulting` to the `admission.failurePolicy.mutation`
+ 6. Change the setting from the previous `admission.failurePolicy.*` to the `admission.failurePolicy.validation` ([#1832](https://github.com/pingcap/tidb-operator/pull/1832), [@Yisaer](https://github.com/Yisaer))
+- Enable TidbCluster defaulting mutation by default which is recommended when admission webhook is used ([#1816](https://github.com/pingcap/tidb-operator/pull/1816), [@Yisaer](https://github.com/Yisaer))
+- Fix a bug that TiKV fails to start while creating the cluster using CR with cluster TLS enabled ([#1808](https://github.com/pingcap/tidb-operator/pull/1808), [@weekface](https://github.com/weekface))
+- Support using prefix in remote storage during backup/restore ([#1790](https://github.com/pingcap/tidb-operator/pull/1790), [@DanielZhangQD](https://github.com/DanielZhangQD))
+
+
+# TiDB Operator v1.1.0-beta.2 Release Notes
+
+This is a pre-release of `v1.1.0`, which focuses on the usability, extensibility and security of TiDB Operator. While we encourage usage in non-critical environments, it is **NOT** recommended to use this version in critical environments.
+
+## Changes since v1.1.0-beta.1
+
+## Action Required
+
+- `--default-storage-class-name` and `--default-backup-storage-class-name `are abandoned, and the storage class defaults to Kubernetes default storage class right now. If you have set default storage class different than Kubernetes default storage class, please set them explicitly in your TiDB cluster helm or YAML files. ([#1581](https://github.com/pingcap/tidb-operator/pull/1581), [@cofyc](https://github.com/cofyc))
+
+
+## Other Notable Changes
+
+- Allow users to configure affinity and tolerations for `Backup` and `Restore`. ([#1737](https://github.com/pingcap/tidb-operator/pull/1737), [@Smana](https://github.com/Smana))
+- Allow AdvancedStatefulSet and Admission Webhook to work together. ([#1640](https://github.com/pingcap/tidb-operator/pull/1640), [@Yisaer](https://github.com/Yisaer))
+- Add a basic deployment example of managing TiDB cluster with custom resources only. ([#1573](https://github.com/pingcap/tidb-operator/pull/1573), [@aylei](https://github.com/aylei))
+- Support TidbCluster Auto-scaling feature based on CPU average utilization load. ([#1731](https://github.com/pingcap/tidb-operator/pull/1731), [@Yisaer](https://github.com/Yisaer))
+- Support user-defined TiDB server/client certificate ([#1714](https://github.com/pingcap/tidb-operator/pull/1714), [@weekface](https://github.com/weekface))
+- Add an option for tidb-backup chart to allow reusing existing PVC or not for restore ([#1708](https://github.com/pingcap/tidb-operator/pull/1708), [@mightyguava](https://github.com/mightyguava))
+- Add `resources`, `imagePullPolicy` and `nodeSelector` field for tidb-backup chart ([#1705](https://github.com/pingcap/tidb-operator/pull/1705), [@mightyguava](https://github.com/mightyguava))
+- Add more SANs (Subject Alternative Name) to TiDB server certificate ([#1702](https://github.com/pingcap/tidb-operator/pull/1702), [@weekface](https://github.com/weekface))
+- Support automatically migrating existing Kubernetes StatefulSets to Advanced StatefulSets when AdvancedStatfulSet feature is enabled ([#1580](https://github.com/pingcap/tidb-operator/pull/1580), [@cofyc](https://github.com/cofyc))
+- Fix the bug in admission webhook which causes PD pod deleting error and allow the deleting pod to request for PD and TiKV when PVC is not found. ([#1568](https://github.com/pingcap/tidb-operator/pull/1568), [@Yisaer](https://github.com/Yisaer))
+- Limit the restart rate for PD and TiKV - only one instance would be restarted each time ([#1532](https://github.com/pingcap/tidb-operator/pull/1532), [@Yisaer](https://github.com/Yisaer))
+- Add default ClusterRef namespace for TidbMonitor as the same as it is deployed and fix the bug that TidbMonitor's Pod can't be created when Spec.PrometheusSpec.logLevel is missing. ([#1500](https://github.com/pingcap/tidb-operator/pull/1500), [@Yisaer](https://github.com/Yisaer))
+- Refine logs for `TidbMonitor` and `TidbInitializer` controller ([#1493](https://github.com/pingcap/tidb-operator/pull/1493), [@aylei](https://github.com/aylei))
+- Avoid unnecessary updates to `Service` and `Deployment` of discovery ([#1499](https://github.com/pingcap/tidb-operator/pull/1499), [@aylei](https://github.com/aylei))
+- Remove some update events that are not very useful ([#1486](https://github.com/pingcap/tidb-operator/pull/1486), [@weekface](https://github.com/weekface))
+
+
# TiDB Operator v1.1.0-beta.1 Release Notes
This is a pre-release of `v1.1.0`, which focuses on the usability, extensibility and security of TiDB Operator. While we encourage usage in non-critical environments, it is **NOT** recommended to use this version in critical environments.
## Changes since v1.0.0
-### Action required
+### Action Required
- ACTION REQUIRED: Add the `timezone` support for [all charts](https://github.com/pingcap/tidb-operator/tree/master/charts) ([#1122](https://github.com/pingcap/tidb-operator/pull/1122), [@weekface](https://github.com/weekface)).
@@ -17,7 +128,7 @@ This is a pre-release of `v1.1.0`, which focuses on the usability, extensibility
All images' time zone maintained by `tidb-operator` is `UTC`. If you use your own images, you need to make sure that the time zone inside your images is `UTC`.
-### Other notable changes
+### Other Notable Changes
- Support backup to S3 with [Backup & Restore (BR)](https://github.com/pingcap/br) ([#1280](https://github.com/pingcap/tidb-operator/pull/1280), [@DanielZhangQD](https://github.com/DanielZhangQD))
- Add basic defaulting and validating for `TidbCluster` ([#1429](https://github.com/pingcap/tidb-operator/pull/1429), [@aylei](https://github.com/aylei))
diff --git a/Makefile b/Makefile
index 2369a5a08e..9ac4af82f0 100644
--- a/Makefile
+++ b/Makefile
@@ -1,9 +1,6 @@
# Set DEBUGGER=1 to build debug symbols
LDFLAGS = $(if $(DEBUGGER),,-s -w) $(shell ./hack/version.sh)
-# SET DOCKER_REGISTRY to change the docker registry
-DOCKER_REGISTRY := $(if $(DOCKER_REGISTRY),$(DOCKER_REGISTRY),localhost:5000)
-
GOVER_MAJOR := $(shell go version | sed -E -e "s/.*go([0-9]+)[.]([0-9]+).*/\1/")
GOVER_MINOR := $(shell go version | sed -E -e "s/.*go([0-9]+)[.]([0-9]+).*/\2/")
GO113 := $(shell [ $(GOVER_MAJOR) -gt 1 ] || [ $(GOVER_MAJOR) -eq 1 ] && [ $(GOVER_MINOR) -ge 13 ]; echo $$?)
@@ -19,6 +16,8 @@ GOENV := GO15VENDOREXPERIMENT="1" CGO_ENABLED=0 GOOS=$(GOOS) GOARCH=$(GOARCH)
GO := $(GOENV) go
GO_BUILD := $(GO) build -trimpath
+DOCKER_REGISTRY ?= localhost:5000
+DOCKER_REPO ?= ${DOCKER_REGISTRY}/pingcap
IMAGE_TAG ?= latest
PACKAGE_LIST := go list ./... | grep -vE "client/(clientset|informers|listers)"
PACKAGE_DIRECTORIES := $(PACKAGE_LIST) | sed 's|github.com/pingcap/tidb-operator/||'
@@ -29,8 +28,8 @@ TEST_COVER_PACKAGES:=go list ./pkg/... | grep -vE "pkg/client" | grep -vE "pkg/t
default: build
docker-push: docker backup-docker
- docker push "${DOCKER_REGISTRY}/pingcap/tidb-operator:${IMAGE_TAG}"
- docker push "${DOCKER_REGISTRY}/pingcap/tidb-backup-manager:${IMAGE_TAG}"
+ docker push "${DOCKER_REPO}/tidb-operator:${IMAGE_TAG}"
+ docker push "${DOCKER_REPO}/tidb-backup-manager:${IMAGE_TAG}"
ifeq ($(NO_BUILD),y)
docker:
@@ -38,7 +37,8 @@ docker:
else
docker: build
endif
- docker build --tag "${DOCKER_REGISTRY}/pingcap/tidb-operator:${IMAGE_TAG}" images/tidb-operator
+ docker build --tag "${DOCKER_REPO}/tidb-operator:${IMAGE_TAG}" images/tidb-operator
+ docker build --tag "${DOCKER_REPO}/tidb-backup-manager:${IMAGE_TAG}" images/tidb-backup-manager
build: controller-manager scheduler discovery admission-webhook apiserver backup-manager
@@ -58,7 +58,7 @@ apiserver:
$(GO_BUILD) -ldflags '$(LDFLAGS)' -o images/tidb-operator/bin/tidb-apiserver cmd/apiserver/main.go
backup-manager:
- $(GO_BUILD) -ldflags '$(LDFLAGS)' -o images/backup-manager/bin/tidb-backup-manager cmd/backup-manager/main.go
+ $(GO_BUILD) -ldflags '$(LDFLAGS)' -o images/tidb-backup-manager/bin/tidb-backup-manager cmd/backup-manager/main.go
ifeq ($(NO_BUILD),y)
backup-docker:
@@ -66,10 +66,10 @@ backup-docker:
else
backup-docker: backup-manager
endif
- docker build --tag "${DOCKER_REGISTRY}/pingcap/tidb-backup-manager:${IMAGE_TAG}" images/backup-manager
+ docker build --tag "${DOCKER_REPO}/tidb-backup-manager:${IMAGE_TAG}" images/tidb-backup-manager
e2e-docker-push: e2e-docker
- docker push "${DOCKER_REGISTRY}/pingcap/tidb-operator-e2e:${IMAGE_TAG}"
+ docker push "${DOCKER_REPO}/tidb-operator-e2e:${IMAGE_TAG}"
ifeq ($(NO_BUILD),y)
e2e-docker:
@@ -85,7 +85,7 @@ endif
cp -r charts/tidb-cluster tests/images/e2e
cp -r charts/tidb-backup tests/images/e2e
cp -r manifests tests/images/e2e
- docker build -t "${DOCKER_REGISTRY}/pingcap/tidb-operator-e2e:${IMAGE_TAG}" tests/images/e2e
+ docker build -t "${DOCKER_REPO}/tidb-operator-e2e:${IMAGE_TAG}" tests/images/e2e
e2e-build:
$(GO_BUILD) -ldflags '$(LDFLAGS)' -o tests/images/e2e/bin/ginkgo github.com/onsi/ginkgo/ginkgo
@@ -97,15 +97,18 @@ e2e-build:
e2e:
./hack/e2e.sh
+e2e-examples:
+ ./hack/e2e-examples.sh
+
stability-test-build:
$(GO_BUILD) -ldflags '$(LDFLAGS)' -o tests/images/stability-test/bin/blockwriter ./tests/cmd/blockwriter
$(GO_BUILD) -ldflags '$(LDFLAGS)' -o tests/images/stability-test/bin/stability-test ./tests/cmd/stability
stability-test-docker: stability-test-build
- docker build -t "${DOCKER_REGISTRY}/pingcap/tidb-operator-stability-test:${IMAGE_TAG}" tests/images/stability-test
+ docker build -t "${DOCKER_REPO}/tidb-operator-stability-test:${IMAGE_TAG}" tests/images/stability-test
stability-test-push: stability-test-docker
- docker push "${DOCKER_REGISTRY}/pingcap/tidb-operator-stability-test:${IMAGE_TAG}"
+ docker push "${DOCKER_REPO}/tidb-operator-stability-test:${IMAGE_TAG}"
fault-trigger:
$(GO_BUILD) -ldflags '$(LDFLAGS)' -o tests/images/fault-trigger/bin/fault-trigger tests/cmd/fault-trigger/*.go
@@ -202,16 +205,16 @@ cli:
$(GO_BUILD) -ldflags '$(LDFLAGS)' -o tkctl cmd/tkctl/main.go
debug-docker-push: debug-build-docker
- docker push "${DOCKER_REGISTRY}/pingcap/debug-launcher:latest"
- docker push "${DOCKER_REGISTRY}/pingcap/tidb-control:latest"
- docker push "${DOCKER_REGISTRY}/pingcap/tidb-debug:latest"
+ docker push "${DOCKER_REPO}/debug-launcher:latest"
+ docker push "${DOCKER_REPO}/tidb-control:latest"
+ docker push "${DOCKER_REPO}/tidb-debug:latest"
debug-build-docker: debug-build
- docker build -t "${DOCKER_REGISTRY}/pingcap/debug-launcher:latest" misc/images/debug-launcher
- docker build -t "${DOCKER_REGISTRY}/pingcap/tidb-control:latest" misc/images/tidb-control
- docker build -t "${DOCKER_REGISTRY}/pingcap/tidb-debug:latest" misc/images/tidb-debug
+ docker build -t "${DOCKER_REPO}/debug-launcher:latest" misc/images/debug-launcher
+ docker build -t "${DOCKER_REPO}/tidb-control:latest" misc/images/tidb-control
+ docker build -t "${DOCKER_REPO}/tidb-debug:latest" misc/images/tidb-debug
debug-build:
$(GO_BUILD) -ldflags '$(LDFLAGS)' -o misc/images/debug-launcher/bin/debug-launcher misc/cmd/debug-launcher/main.go
-.PHONY: check check-setup check-all build e2e-build debug-build cli e2e
+.PHONY: check check-setup check-all build e2e-build debug-build cli e2e test docker e2e-docker debug-build-docker
diff --git a/README.md b/README.md
index 2a8fa19297..be0ebc8e0f 100644
--- a/README.md
+++ b/README.md
@@ -53,11 +53,12 @@ Read the [Roadmap](./ROADMAP.md).
Read the [Quick Start Guide](https://pingcap.com/docs/v3.0/tidb-in-kubernetes/tidb-operator-overview/), which includes all the guides for managing TiDB clusters in Kubernetes.
-
## Documentation
-- [English](https://pingcap.com/docs/v3.0/tidb-in-kubernetes/tidb-operator-overview/)
-- [简体中文](https://pingcap.com/docs-cn/v3.0/tidb-in-kubernetes/tidb-operator-overview/)
+All the TiDB Operator documentation is maintained in the [docs-tidb-operator repository](https://github.com/pingcap/docs-tidb-operator). You can also see the documentation at PingCAP website:
+
+- [English](https://pingcap.com/docs/tidb-in-kubernetes/stable/tidb-operator-overview/)
+- [简体中文](https://pingcap.com/docs-cn/tidb-in-kubernetes/stable/tidb-operator-overview/)
## Contributing
diff --git a/charts/tidb-backup/templates/backup-job.yaml b/charts/tidb-backup/templates/backup-job.yaml
index 24b7346252..a6c8eb953d 100644
--- a/charts/tidb-backup/templates/backup-job.yaml
+++ b/charts/tidb-backup/templates/backup-job.yaml
@@ -33,6 +33,8 @@ spec:
{{- if .Values.serviceAccount }}
serviceAccount: {{ .Values.serviceAccount }}
{{- end }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
containers:
- name: backup
image: {{ .Values.image.backup }}
@@ -88,6 +90,14 @@ spec:
name: {{ .Values.secretName }}
key: password
restartPolicy: OnFailure
+ {{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+ {{- end }}
+ {{- if .Values.tolerations }}
+ tolerations:
+{{ toYaml .Values.tolerations | indent 8 }}
+ {{- end }}
volumes:
- name: data
persistentVolumeClaim:
diff --git a/charts/tidb-backup/templates/backup-pvc.yaml b/charts/tidb-backup/templates/backup-pvc.yaml
index 290868f8af..5f1bfbc051 100644
--- a/charts/tidb-backup/templates/backup-pvc.yaml
+++ b/charts/tidb-backup/templates/backup-pvc.yaml
@@ -1,8 +1,12 @@
-{{- if (or (eq .Values.mode "backup") (eq .Values.mode "scheduled-restore")) }}
+{{- if (or (eq .Values.mode "backup") (eq .Values.mode "scheduled-restore") (and (eq .Values.mode "restore") (not .Values.restoreUsingExistingVolume))) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
+ {{- if eq .Values.mode "restore" }}
+ name: restore-{{ tpl .Values.name . }}
+ {{- else }}
name: {{ tpl .Values.name . }}
+ {{- end }}
labels:
app.kubernetes.io/name: {{ template "chart.name" . }}
app.kubernetes.io/managed-by: tidb-operator
diff --git a/charts/tidb-backup/templates/restore-job.yaml b/charts/tidb-backup/templates/restore-job.yaml
index d5cd17a6ab..a4347eaac4 100644
--- a/charts/tidb-backup/templates/restore-job.yaml
+++ b/charts/tidb-backup/templates/restore-job.yaml
@@ -25,9 +25,16 @@ spec:
{{- end }}
spec:
restartPolicy: OnFailure
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
containers:
- name: tidb-restore-job
image: {{ .Values.image.backup }}
+ imagePullPolicy: {{ .Values.image.pullPolicy | default "IfNotPresent" }}
+ {{- if .Values.resources }}
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ {{- end }}
command:
- /bin/sh
- -c
@@ -74,10 +81,20 @@ spec:
secretKeyRef:
name: {{ .Values.secretName }}
key: password
+ {{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+ {{- end }}
+ {{- if .Values.tolerations }}
+ tolerations:
+{{ toYaml .Values.tolerations | indent 8 }}
+ {{- end }}
volumes:
- name: data
persistentVolumeClaim:
- {{- if .Values.scheduledBackupName }}
+ {{- if not .Values.restoreUsingExistingVolume }}
+ claimName: restore-{{ .Values.name }}
+ {{- else if .Values.scheduledBackupName }}
claimName: {{ .Values.name }}-scheduled-backup
{{- else }}
claimName: {{ .Values.name }}
diff --git a/charts/tidb-backup/templates/scripts/_start_backup.sh.tpl b/charts/tidb-backup/templates/scripts/_start_backup.sh.tpl
index f33b0d1b14..b321c6a52d 100644
--- a/charts/tidb-backup/templates/scripts/_start_backup.sh.tpl
+++ b/charts/tidb-backup/templates/scripts/_start_backup.sh.tpl
@@ -67,22 +67,46 @@ $creds
EOF
cd "${backup_base_dir}"
+{{- if .Values.gcp.prefix }}
+tar -cf - "${backup_name}" | pigz -p 16 \
+ | rclone --config /tmp/rclone.conf rcat gcp:${bucket}/{{ .Values.gcp.prefix }}/${backup_name}/${backup_name}.tgz
+{{- else }}
tar -cf - "${backup_name}" | pigz -p 16 \
| rclone --config /tmp/rclone.conf rcat gcp:${bucket}/${backup_name}/${backup_name}.tgz
{{- end }}
+{{- end }}
{{- if .Values.ceph }}
uploader \
--cloud=ceph \
+ {{- if .Values.ceph.prefix }}
+ --bucket={{ .Values.ceph.bucket }}/{{ .Values.ceph.prefix }} \
+ {{- else }}
--bucket={{ .Values.ceph.bucket }} \
+ {{- end }}
--endpoint={{ .Values.ceph.endpoint }} \
--backup-dir=${dirname}
{{- end }}
{{- if .Values.s3 }}
-uploader \
- --cloud=aws \
- --region={{ .Values.s3.region }} \
- --bucket={{ .Values.s3.bucket }} \
- --backup-dir=${dirname}
+# Once we know there are no more credentials that will be logged we can run with -x
+set -x
+bucket={{ .Values.s3.bucket }}
+
+cat < /tmp/rclone.conf
+[s3]
+type = s3
+provider = AWS
+env_auth = true
+region = {{ .Values.s3.region }}
+EOF
+
+cd "${backup_base_dir}"
+{{- if .Values.s3.prefix }}
+tar -cf - "${backup_name}" | pigz -p 16 \
+ | rclone --config /tmp/rclone.conf rcat s3:${bucket}/{{ .Values.s3.prefix }}/${backup_name}/${backup_name}.tgz
+{{- else }}
+tar -cf - "${backup_name}" | pigz -p 16 \
+ | rclone --config /tmp/rclone.conf rcat s3:${bucket}/${backup_name}/${backup_name}.tgz
+{{- end }}
{{- end }}
diff --git a/charts/tidb-backup/templates/scripts/_start_restore.sh.tpl b/charts/tidb-backup/templates/scripts/_start_restore.sh.tpl
index 1489630a81..c15ffa146f 100644
--- a/charts/tidb-backup/templates/scripts/_start_restore.sh.tpl
+++ b/charts/tidb-backup/templates/scripts/_start_restore.sh.tpl
@@ -7,7 +7,11 @@ host=`echo {{ .Values.clusterName }}_TIDB_SERVICE_HOST | tr '[a-z]' '[A-Z]' | tr
{{- if .Values.gcp }}
downloader \
--cloud=gcp \
+ {{- if .Values.gcp.prefix }}
+ --bucket={{ .Values.gcp.bucket }}/{{ .Values.gcp.prefix }} \
+ {{- else }}
--bucket={{ .Values.gcp.bucket }} \
+ {{- end }}
--srcDir=${BACKUP_NAME} \
--destDir=/data
{{- end }}
@@ -15,7 +19,11 @@ downloader \
{{- if .Values.ceph }}
downloader \
--cloud=ceph \
+ {{- if .Values.ceph.prefix }}
+ --bucket={{ .Values.ceph.bucket }}/{{ .Values.ceph.prefix }} \
+ {{- else }}
--bucket={{ .Values.ceph.bucket }} \
+ {{- end }}
--endpoint={{ .Values.ceph.endpoint }} \
--srcDir=${BACKUP_NAME} \
--destDir=/data
@@ -25,7 +33,11 @@ downloader \
downloader \
--cloud=aws \
--region={{ .Values.s3.region }} \
+ {{- if .Values.s3.prefix }}
+ --bucket={{ .Values.s3.bucket }}/{{ .Values.s3.prefix }} \
+ {{- else }}
--bucket={{ .Values.s3.bucket }} \
+ {{- end }}
--srcDir=${BACKUP_NAME} \
--destDir=/data
{{- end }}
diff --git a/charts/tidb-backup/values.yaml b/charts/tidb-backup/values.yaml
index 1d6085d166..14a1044a37 100644
--- a/charts/tidb-backup/values.yaml
+++ b/charts/tidb-backup/values.yaml
@@ -27,7 +27,11 @@ name: fullbackup-{{ date "200601021504" .Release.Time }}
image:
pullPolicy: IfNotPresent
# https://github.com/pingcap/tidb-cloud-backup
- backup: pingcap/tidb-cloud-backup:20191217
+ backup: pingcap/tidb-cloud-backup:20200229
+
+## nodeSelector ensure pods only assigning to nodes which have each of the indicated key-value pairs as labels
+## ref:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+nodeSelector: {}
# Add additional labels for backup/restore job's pod
# ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
@@ -36,6 +40,15 @@ extraLabels: {}
# Add annotations for backup/restore job's pod
annotations: {}
+## affinity defines pd scheduling rules,it's default settings is empty.
+## please read the affinity document before set your scheduling rule:
+## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+affinity: {}
+
+## Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
+## refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
+tolerations: []
+
# secretName is the name of the secret which stores user and password used for backup/restore
# Note: you must give the user enough privilege to do the backup and restore
# you can create the secret by:
@@ -80,12 +93,18 @@ restoreOptions: "-t 16"
# When a GC happens, the current time minus this value is the safe point.
tikvGCLifeTime: 720h
+# By default, restores are performed by binding to an existing volume containing backup data.
+# To restore from gcp, ceph, or s3, set this to false to create a new volume to load the backup into
+# This setting only affects the "restore" mode.
+restoreUsingExistingVolume: true
+
# By default, the backup/restore uses PV to store/load backup data
# You can choose to store/load backup data to/from gcp, ceph or s3 bucket by enabling the following corresponding section:
# backup to or restore from gcp bucket, the backup path is in the form of -
gcp: {}
# bucket: ""
+ # prefix: ""
# secretName is not necessary on GKE if you use the workload identity feature
# secretName is the name of the secret which stores the gcp service account credentials json file
# The service account must have read/write permission to the above bucket.
@@ -99,6 +118,7 @@ gcp: {}
ceph: {}
# endpoint: ""
# bucket: ""
+ # prefix: ""
# secretName is the name of the secret which stores ceph object store access key and secret key
# You can create the secret by:
# kubectl create secret generic ceph-backup-secret --namespace= --from-literal=access_key= --from-literal=secret_key=
@@ -108,6 +128,7 @@ ceph: {}
s3: {}
# region: ""
# bucket: ""
+ # prefix: ""
# secretName is the name of the secret which stores s3 object store access key and secret key
# This is not necessary on AWS. Instead you should be able to get the credentials from the EKS service IAM role.
# You can create the secret by:
diff --git a/charts/tidb-cluster/templates/_helpers.tpl b/charts/tidb-cluster/templates/_helpers.tpl
index 75732a0456..766d43789b 100644
--- a/charts/tidb-cluster/templates/_helpers.tpl
+++ b/charts/tidb-cluster/templates/_helpers.tpl
@@ -28,7 +28,7 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
{{- end -}}
{{- define "cluster.scheme" -}}
-{{ if .Values.enableTLSCluster }}https{{ else }}http{{ end }}
+{{ if and .Values.tlsCluster .Values.tlsCluster.enabled }}https{{ else }}http{{ end }}
{{- end -}}
{{/*
@@ -41,11 +41,11 @@ config-file: |-
{{- if .Values.pd.config }}
{{ .Values.pd.config | indent 2 }}
{{- end -}}
- {{- if .Values.enableTLSCluster }}
+ {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }}
[security]
- cacert-path = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
- cert-path = "/var/lib/pd-tls/cert"
- key-path = "/var/lib/pd-tls/key"
+ cacert-path = "/var/lib/pd-tls/ca.crt"
+ cert-path = "/var/lib/pd-tls/tls.crt"
+ key-path = "/var/lib/pd-tls/tls.key"
{{- end -}}
{{- end -}}
@@ -64,11 +64,11 @@ config-file: |-
{{- if .Values.tikv.config }}
{{ .Values.tikv.config | indent 2 }}
{{- end -}}
- {{- if .Values.enableTLSCluster }}
+ {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }}
[security]
- ca-path = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
- cert-path = "/var/lib/tikv-tls/cert"
- key-path = "/var/lib/tikv-tls/key"
+ ca-path = "/var/lib/tikv-tls/ca.crt"
+ cert-path = "/var/lib/tikv-tls/tls.crt"
+ key-path = "/var/lib/tikv-tls/tls.key"
{{- end -}}
{{- end -}}
@@ -91,18 +91,18 @@ config-file: |-
{{- if .Values.tidb.config }}
{{ .Values.tidb.config | indent 2 }}
{{- end -}}
- {{- if or .Values.enableTLSCluster .Values.enableTLSClient }}
+ {{- if or (and .Values.tlsCluster .Values.tlsCluster.enabled) (and .Values.tidb.tlsClient .Values.tidb.tlsClient.enabled) }}
[security]
{{- end -}}
- {{- if .Values.enableTLSCluster }}
- cluster-ssl-ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
- cluster-ssl-cert = "/var/lib/tidb-tls/cert"
- cluster-ssl-key = "/var/lib/tidb-tls/key"
+ {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }}
+ cluster-ssl-ca = "/var/lib/tidb-tls/ca.crt"
+ cluster-ssl-cert = "/var/lib/tidb-tls/tls.crt"
+ cluster-ssl-key = "/var/lib/tidb-tls/tls.key"
{{- end -}}
- {{- if .Values.tidb.enableTLSClient }}
- ssl-ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
- ssl-cert = "/var/lib/tidb-server-tls/cert"
- ssl-key = "/var/lib/tidb-server-tls/key"
+ {{- if and .Values.tidb.tlsClient .Values.tidb.tlsClient.enabled }}
+ ssl-ca = "/var/lib/tidb-server-tls/ca.crt"
+ ssl-cert = "/var/lib/tidb-server-tls/tls.crt"
+ ssl-key = "/var/lib/tidb-server-tls/tls.key"
{{- end -}}
{{- end -}}
@@ -114,10 +114,20 @@ config-file: |-
{{/*
Encapsulate pump configmap data for consistent digest calculation
*/}}
+{{- define "pump.tlsSecretName" -}}
+{{ .Values.clusterName }}-pump
+{{- end -}}
+
{{- define "pump-configmap.data" -}}
pump-config: |-
{{- if .Values.binlog.pump.config }}
{{ .Values.binlog.pump.config | indent 2 }}
+ {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }}
+ [security]
+ ssl-ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+ ssl-cert = "/var/lib/pump-tls/tls.crt"
+ ssl-key = "/var/lib/pump-tls/tls.key"
+ {{- end -}}
{{- else -}}
{{ tuple "config/_pump-config.tpl" . | include "helm-toolkit.utils.template" | indent 2 }}
{{- end -}}
diff --git a/charts/tidb-cluster/templates/config/_prometheus-config.tpl b/charts/tidb-cluster/templates/config/_prometheus-config.tpl
index f73573fa74..12bdc79bee 100644
--- a/charts/tidb-cluster/templates/config/_prometheus-config.tpl
+++ b/charts/tidb-cluster/templates/config/_prometheus-config.tpl
@@ -19,13 +19,17 @@ scrape_configs:
names:
- {{ .Release.Namespace }}
{{- end }}
+ {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }}
+ scheme: https
+ tls_config:
+ insecure_skip_verify: false
+ ca_file: /var/lib/cluster-client-tls/ca.crt
+ cert_file: /var/lib/cluster-client-tls/tls.crt
+ key_file: /var/lib/cluster-client-tls/tls.key
+ {{- else }}
+ scheme: http
tls_config:
insecure_skip_verify: true
- {{- if .Values.enableTLSCluster }}
- ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- cert_file: /var/lib/pd-client-tls/cert
- key_file: /var/lib/pd-client-tls/key
- scheme: https
{{- end }}
relabel_configs:
- source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance]
@@ -41,11 +45,12 @@ scrape_configs:
action: replace
target_label: __metrics_path__
regex: (.+)
- - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
- action: replace
- regex: ([^:]+)(?::\d+)?;(\d+)
- replacement: $1:$2
+ - source_labels: [__meta_kubernetes_pod_name, __meta_kubernetes_pod_label_app_kubernetes_io_instance,
+ __meta_kubernetes_pod_annotation_prometheus_io_port]
+ regex: (.+);(.+);(.+)
target_label: __address__
+ replacement: $1.$2-pd-peer:$3
+ action: replace
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
@@ -71,13 +76,17 @@ scrape_configs:
names:
- {{ .Release.Namespace }}
{{- end }}
+ {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }}
+ scheme: https
+ tls_config:
+ insecure_skip_verify: false
+ ca_file: /var/lib/cluster-client-tls/ca.crt
+ cert_file: /var/lib/cluster-client-tls/tls.crt
+ key_file: /var/lib/cluster-client-tls/tls.key
+ {{- else }}
+ scheme: http
tls_config:
insecure_skip_verify: true
- {{- if .Values.enableTLSCluster }}
- ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- cert_file: /var/lib/pd-client-tls/cert
- key_file: /var/lib/pd-client-tls/key
- scheme: https
{{- end }}
relabel_configs:
- source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance]
@@ -93,11 +102,12 @@ scrape_configs:
action: replace
target_label: __metrics_path__
regex: (.+)
- - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
- action: replace
- regex: ([^:]+)(?::\d+)?;(\d+)
- replacement: $1:$2
+ - source_labels: [__meta_kubernetes_pod_name, __meta_kubernetes_pod_label_app_kubernetes_io_instance,
+ __meta_kubernetes_pod_annotation_prometheus_io_port]
+ regex: (.+);(.+);(.+)
target_label: __address__
+ replacement: $1.$2-tidb-peer:$3
+ action: replace
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
@@ -123,16 +133,23 @@ scrape_configs:
names:
- {{ .Release.Namespace }}
{{- end }}
+ scheme: http
tls_config:
insecure_skip_verify: true
-# TiKV doesn't support scheme https for now.
-# And we should fix it after TiKV fix this issue: https://github.com/tikv/tikv/issues/5340
-# {{- if .Values.enableTLSCluster }}
-# ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-# cert_file: /var/lib/pd-client-tls/cert
-# key_file: /var/lib/pd-client-tls/key
-# scheme: https
-# {{- end }}
+ # TiKV doesn't support scheme https for now.
+ # And we should fix it after TiKV fix this issue: https://github.com/tikv/tikv/issues/5340
+ # {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }}
+ # scheme: https
+ # tls_config:
+ # insecure_skip_verify: false
+ # ca_file: /var/lib/cluster-client-tls/ca.crt
+ # cert_file: /var/lib/cluster-client-tls/tls.crt
+ # key_file: /var/lib/cluster-client-tls/tls.key
+ # {{- else }}
+ # scheme: http
+ # tls_config:
+ # insecure_skip_verify: true
+ # {{- end }}
relabel_configs:
- source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance]
action: keep
@@ -147,11 +164,12 @@ scrape_configs:
action: replace
target_label: __metrics_path__
regex: (.+)
- - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
- action: replace
- regex: ([^:]+)(?::\d+)?;(\d+)
- replacement: $1:$2
+ - source_labels: [__meta_kubernetes_pod_name, __meta_kubernetes_pod_label_app_kubernetes_io_instance,
+ __meta_kubernetes_pod_annotation_prometheus_io_port]
+ regex: (.+);(.+);(.+)
target_label: __address__
+ replacement: $1.$2-tikv-peer:$3
+ action: replace
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
diff --git a/charts/tidb-cluster/templates/config/_pump-config.tpl b/charts/tidb-cluster/templates/config/_pump-config.tpl
index d0b41eddf0..07795a899e 100644
--- a/charts/tidb-cluster/templates/config/_pump-config.tpl
+++ b/charts/tidb-cluster/templates/config/_pump-config.tpl
@@ -19,14 +19,6 @@ heartbeat-interval = {{ .Values.binlog.pump.heartbeatInterval | default 2 }}
# a comma separated list of PD endpoints
pd-urls = "{{ template "cluster.scheme" . }}://{{ template "cluster.name" . }}-pd:2379"
-#[security]
-# Path of file that contains list of trusted SSL CAs for connection with cluster components.
-# ssl-ca = "/path/to/ca.pem"
-# Path of file that contains X509 certificate in PEM format for connection with cluster components.
-# ssl-cert = "/path/to/drainer.pem"
-# Path of file that contains X509 key in PEM format for connection with cluster components.
-# ssl-key = "/path/to/drainer-key.pem"
-#
[storage]
# Set to `true` (default) for best reliability, which prevents data loss when there is a power failure.
sync-log = {{ .Values.binlog.pump.syncLog | default true }}
@@ -43,3 +35,13 @@ sync-log = {{ .Values.binlog.pump.syncLog | default true }}
# write-buffer = 67108864
# write-L0-pause-trigger = 24
# write-L0-slowdown-trigger = 17
+{{ if and .Values.tlsCluster .Values.tlsCluster.enabled }}
+[security]
+# Path of file that contains list of trusted SSL CAs for connection with cluster components.
+ssl-ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+# Path of file that contains X509 certificate in PEM format for connection with cluster components.
+ssl-cert = "/var/lib/pump-tls/tls.crt"
+# Path of file that contains X509 key in PEM format for connection with cluster components.
+ssl-key = "/var/lib/pump-tls/tls.key"
+{{- end -}}
+
diff --git a/charts/tidb-cluster/templates/discovery-deployment.yaml b/charts/tidb-cluster/templates/discovery-deployment.yaml
index 38e21e41b0..df6bd7c507 100644
--- a/charts/tidb-cluster/templates/discovery-deployment.yaml
+++ b/charts/tidb-cluster/templates/discovery-deployment.yaml
@@ -29,6 +29,14 @@ spec:
{{- if .Values.rbac.create }}
serviceAccount: {{ template "cluster.name" . }}-discovery
{{- end }}
+ {{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 8 }}
+ {{- end }}
+ {{- if .Values.tolerations }}
+ tolerations:
+{{ toYaml .Values.tolerations | indent 8 }}
+ {{- end }}
{{- end }}
containers:
- name: discovery
diff --git a/charts/tidb-cluster/templates/monitor-deployment.yaml b/charts/tidb-cluster/templates/monitor-deployment.yaml
index 3b7c82c7f4..b297a1553c 100644
--- a/charts/tidb-cluster/templates/monitor-deployment.yaml
+++ b/charts/tidb-cluster/templates/monitor-deployment.yaml
@@ -134,9 +134,9 @@ spec:
- name: prometheus-rules
mountPath: /prometheus-rules
readOnly: false
- {{- if .Values.enableTLSCluster }}
- - name: tls-pd-client
- mountPath: /var/lib/pd-client-tls
+ {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }}
+ - name: cluster-client-tls
+ mountPath: /var/lib/cluster-client-tls
readOnly: true
{{- end }}
{{- if .Values.monitor.grafana.create }}
@@ -241,11 +241,11 @@ spec:
name: prometheus-rules
- emptyDir: {}
name: grafana-dashboard
- {{- if .Values.enableTLSCluster }}
- - name: tls-pd-client
+ {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }}
+ - name: cluster-client-tls
secret:
defaultMode: 420
- secretName: {{ .Release.Name }}-pd-client
+ secretName: {{ .Release.Name }}-cluster-client-secret
{{- end }}
{{- if .Values.monitor.tolerations }}
tolerations:
diff --git a/charts/tidb-cluster/templates/monitor-rbac.yaml b/charts/tidb-cluster/templates/monitor-rbac.yaml
index 1bb7aca7b2..628100b7c1 100644
--- a/charts/tidb-cluster/templates/monitor-rbac.yaml
+++ b/charts/tidb-cluster/templates/monitor-rbac.yaml
@@ -23,6 +23,16 @@ rules:
resources:
- pods
verbs: ["get", "list", "watch"]
+ {{- if .Capabilities.APIVersions.Has "security.openshift.io/v1" }}
+- apiGroups:
+ - security.openshift.io
+ resourceNames:
+ - anyuid
+ resources:
+ - securitycontextconstraints
+ verbs:
+ - use
+ {{- end }}
{{- if .Values.rbac.crossNamespace }}
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
diff --git a/charts/tidb-cluster/templates/pump-statefulset.yaml b/charts/tidb-cluster/templates/pump-statefulset.yaml
index 90a61b62c9..f73fa3c06d 100644
--- a/charts/tidb-cluster/templates/pump-statefulset.yaml
+++ b/charts/tidb-cluster/templates/pump-statefulset.yaml
@@ -55,6 +55,11 @@ spec:
mountPath: /data
- name: config
mountPath: /etc/pump
+ {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }}
+ - name: pump-tls
+ mountPath: /var/lib/pump-tls
+ readOnly: true
+ {{- end }}
resources:
{{ toYaml .Values.binlog.pump.resources | indent 10 }}
{{- if and (ne .Values.timezone "UTC") (ne .Values.timezone "") }}
@@ -73,6 +78,11 @@ spec:
items:
- key: pump-config
path: pump.toml
+ {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }}
+ - name: pump-tls
+ secret:
+ secretName: {{ include "pump.tlsSecretName" . }}
+ {{- end }}
volumeClaimTemplates:
- metadata:
name: data
diff --git a/charts/tidb-cluster/templates/scheduled-backup-cronjob.yaml b/charts/tidb-cluster/templates/scheduled-backup-cronjob.yaml
index f7860f3ea7..d99b88ace8 100644
--- a/charts/tidb-cluster/templates/scheduled-backup-cronjob.yaml
+++ b/charts/tidb-cluster/templates/scheduled-backup-cronjob.yaml
@@ -74,7 +74,7 @@ spec:
- name: GOOGLE_APPLICATION_CREDENTIALS
value: /gcp/credentials.json
{{- end }}
- {{- if or .Values.scheduledBackup.ceph .Values.scheduledBackup.s3 }}
+ {{- if or .Values.scheduledBackup.ceph.secretName .Values.scheduledBackup.s3.secretName }}
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
@@ -97,6 +97,14 @@ spec:
name: {{ .Values.scheduledBackup.secretName }}
key: password
restartPolicy: {{ .Values.scheduledBackup.restartPolicy | default "OnFailure" }}
+ {{- if .Values.scheduledBackup.affinity }}
+ affinity:
+{{ toYaml .Values.scheduledBackup.affinity | indent 12 }}
+ {{- end }}
+ {{- if .Values.scheduledBackup.tolerations }}
+ tolerations:
+{{ toYaml .Values.scheduledBackup.tolerations | indent 12 }}
+ {{- end }}
volumes:
- name: data
persistentVolumeClaim:
diff --git a/charts/tidb-cluster/templates/scripts/_initialize_tidb_users.py.tpl b/charts/tidb-cluster/templates/scripts/_initialize_tidb_users.py.tpl
index 290376a493..05051b3c5f 100755
--- a/charts/tidb-cluster/templates/scripts/_initialize_tidb_users.py.tpl
+++ b/charts/tidb-cluster/templates/scripts/_initialize_tidb_users.py.tpl
@@ -26,3 +26,4 @@ if permit_host != '%%':
conn.cursor().execute("update mysql.user set Host=%s where User='root';", (permit_host,))
conn.cursor().execute("flush privileges;")
conn.commit()
+conn.close()
diff --git a/charts/tidb-cluster/templates/scripts/_start_scheduled_backup.sh.tpl b/charts/tidb-cluster/templates/scripts/_start_scheduled_backup.sh.tpl
index 3e42ef7091..9901756475 100755
--- a/charts/tidb-cluster/templates/scripts/_start_scheduled_backup.sh.tpl
+++ b/charts/tidb-cluster/templates/scripts/_start_scheduled_backup.sh.tpl
@@ -3,7 +3,8 @@ set -euo pipefail
host=$(getent hosts {{ template "cluster.name" . }}-tidb | head | awk '{print $1}')
backupName=scheduled-backup-`date "+%Y%m%d-%H%M%S"`
-backupPath=/data/${backupName}
+backupBase=/data
+backupPath=${backupBase}/${backupName}
echo "making dir ${backupPath}"
mkdir -p ${backupPath}
@@ -37,10 +38,29 @@ echo "Reset TiKV GC life time to ${gc_life_time}"
/usr/bin/mysql -h${host} -P4000 -u${TIDB_USER} ${password_str} -Nse "select variable_name,variable_value from mysql.tidb where variable_name='tikv_gc_life_time';"
{{- if .Values.scheduledBackup.gcp }}
-uploader \
- --cloud=gcp \
- --bucket={{ .Values.scheduledBackup.gcp.bucket }} \
- --backup-dir=${backupPath}
+# Once we know there are no more credentials that will be logged we can run with -x
+set -x
+bucket={{ .Values.scheduledBackup.gcp.bucket }}
+creds=${GOOGLE_APPLICATION_CREDENTIALS:-""}
+if ! [[ -z $creds ]] ; then
+creds="service_account_file = ${creds}"
+fi
+
+cat < /tmp/rclone.conf
+[gcp]
+type = google cloud storage
+bucket_policy_only = true
+$creds
+EOF
+
+cd "${backupBase}"
+{{- if .Values.scheduledBackup.gcp.prefix }}
+tar -cf - "${backupName}" | pigz -p 16 \
+ | rclone --config /tmp/rclone.conf rcat gcp:${bucket}/{{ .Values.scheduledBackup.gcp.prefix }}/${backupName}/${backupName}.tgz
+{{- else }}
+tar -cf - "${backupName}" | pigz -p 16 \
+ | rclone --config /tmp/rclone.conf rcat gcp:${bucket}/${backupName}/${backupName}.tgz
+{{- end }}
{{- end }}
{{- if .Values.scheduledBackup.ceph }}
@@ -52,11 +72,26 @@ uploader \
{{- end }}
{{- if .Values.scheduledBackup.s3 }}
-uploader \
- --cloud=aws \
- --region={{ .Values.scheduledBackup.s3.region }} \
- --bucket={{ .Values.scheduledBackup.s3.bucket }} \
- --backup-dir=${backupPath}
+# Once we know there are no more credentials that will be logged we can run with -x
+set -x
+bucket={{ .Values.scheduledBackup.s3.bucket }}
+
+cat < /tmp/rclone.conf
+[s3]
+type = s3
+provider = AWS
+env_auth = true
+region = {{ .Values.scheduledBackup.s3.region }}
+EOF
+
+cd "${backupBase}"
+{{- if .Values.scheduledBackup.s3.prefix }}
+tar -cf - "${backupName}" | pigz -p 16 \
+ | rclone --config /tmp/rclone.conf rcat s3:${bucket}/{{ .Values.scheduledBackup.s3.prefix }}/${backupName}/${backupName}.tgz
+{{- else }}
+tar -cf - "${backupName}" | pigz -p 16 \
+ | rclone --config /tmp/rclone.conf rcat s3:${bucket}/${backupName}/${backupName}.tgz
+{{- end }}
{{- end }}
{{- if and (.Values.scheduledBackup.cleanupAfterUpload) (or (.Values.scheduledBackup.gcp) (or .Values.scheduledBackup.ceph .Values.scheduledBackup.s3)) }}
diff --git a/charts/tidb-cluster/templates/scripts/_start_tidb.sh.tpl b/charts/tidb-cluster/templates/scripts/_start_tidb.sh.tpl
old mode 100644
new mode 100755
index ee79a7594e..0ad0027130
--- a/charts/tidb-cluster/templates/scripts/_start_tidb.sh.tpl
+++ b/charts/tidb-cluster/templates/scripts/_start_tidb.sh.tpl
@@ -26,7 +26,10 @@ then
tail -f /dev/null
fi
+# Use HOSTNAME if POD_NAME is unset for backward compatibility.
+POD_NAME=${POD_NAME:-$HOSTNAME}
ARGS="--store=tikv \
+--advertise-address=${POD_NAME}.${HEADLESS_SERVICE_NAME}.${NAMESPACE}.svc \
--host=0.0.0.0 \
--path=${CLUSTER_NAME}-pd:2379 \
--config=/etc/tidb/tidb.toml
diff --git a/charts/tidb-cluster/templates/scripts/_start_tikv.sh.tpl b/charts/tidb-cluster/templates/scripts/_start_tikv.sh.tpl
index d4bb6c590f..806a242be4 100644
--- a/charts/tidb-cluster/templates/scripts/_start_tikv.sh.tpl
+++ b/charts/tidb-cluster/templates/scripts/_start_tikv.sh.tpl
@@ -39,6 +39,8 @@ ARGS="--pd={{ template "cluster.scheme" . }}://${CLUSTER_NAME}-pd:2379 \
--config=/etc/tikv/tikv.toml
"
+{{ .Values.tikv.postArgScript }}
+
echo "starting tikv-server ..."
echo "/tikv-server ${ARGS}"
exec /tikv-server ${ARGS}
diff --git a/charts/tidb-cluster/templates/tidb-cluster.yaml b/charts/tidb-cluster/templates/tidb-cluster.yaml
index 6c010a668d..1567b6d9df 100644
--- a/charts/tidb-cluster/templates/tidb-cluster.yaml
+++ b/charts/tidb-cluster/templates/tidb-cluster.yaml
@@ -21,7 +21,10 @@ spec:
pvReclaimPolicy: {{ .Values.pvReclaimPolicy }}
enablePVReclaim: {{ .Values.enablePVReclaim }}
timezone: {{ .Values.timezone | default "UTC" }}
- enableTLSCluster: {{ .Values.enableTLSCluster | default false }}
+{{- if .Values.tlsCluster }}
+ tlsCluster:
+{{ toYaml .Values.tlsCluster | indent 4 }}
+{{- end }}
services:
{{ toYaml .Values.services | indent 4 }}
schedulerName: {{ .Values.schedulerName | default "default-scheduler" }}
@@ -92,7 +95,10 @@ spec:
{{- end }}
maxFailoverCount: {{ .Values.tikv.maxFailoverCount | default 3 }}
tidb:
- enableTLSClient: {{ .Values.tidb.enableTLSClient | default false }}
+ {{- if .Values.tidb.tlsClient }}
+ tlsClient:
+{{ toYaml .Values.tidb.tlsClient | indent 6 }}
+ {{- end }}
replicas: {{ .Values.tidb.replicas }}
image: {{ .Values.tidb.image }}
imagePullPolicy: {{ .Values.tidb.imagePullPolicy | default "IfNotPresent" }}
diff --git a/charts/tidb-cluster/templates/tidb-initializer-job.yaml b/charts/tidb-cluster/templates/tidb-initializer-job.yaml
index f5dab4c2e6..e792dc8037 100644
--- a/charts/tidb-cluster/templates/tidb-initializer-job.yaml
+++ b/charts/tidb-cluster/templates/tidb-initializer-job.yaml
@@ -44,6 +44,8 @@ spec:
fi
done
echo "info: successfully connected to $host:$port, able to initialize TiDB now"
+ resources:
+{{ toYaml .Values.tidb.initializer.resources | indent 10 }}
containers:
- name: mysql-client
image: {{ .Values.mysqlClient.image }}
diff --git a/charts/tidb-cluster/values.yaml b/charts/tidb-cluster/values.yaml
index 80dd0bd2c4..ed5d245ff1 100644
--- a/charts/tidb-cluster/values.yaml
+++ b/charts/tidb-cluster/values.yaml
@@ -38,7 +38,7 @@ services:
type: ClusterIP
discovery:
- image: pingcap/tidb-operator:v1.1.0-beta.1
+ image: pingcap/tidb-operator:v1.1.0-rc.2
imagePullPolicy: IfNotPresent
resources:
limits:
@@ -48,6 +48,15 @@ discovery:
cpu: 80m
memory: 50Mi
+ ## affinity defines discovery scheduling rules,it's default settings is empty.
+ ## please read the affinity document before set your scheduling rule:
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ affinity: {}
+
+ ## Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
+ ## refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
+ tolerations: []
+
# Whether enable ConfigMap Rollout management.
# When enabling, change of ConfigMap will trigger a graceful rolling-update of the component.
# This feature is only available in tidb-operator v1.0 or higher.
@@ -55,10 +64,23 @@ discovery:
# if the ConfigMap was not changed.
enableConfigMapRollout: true
-# Whether enable TLS connections between server nodes.
-# When enabled, PD/TiDB/TiKV will use TLS encrypted connections to transfer data between each node,
-# certificates will be generated automatically (if not already present).
-enableTLSCluster: false
+# Whether enable the TLS connection between TiDB server components
+tlsCluster:
+ # The steps to enable this feature:
+ # 1. Generate TiDB server components certificates and a client-side certifiacete for them.
+ # There are multiple ways to generate these certificates:
+ # - user-provided certificates: https://pingcap.com/docs/stable/how-to/secure/generate-self-signed-certificates/
+ # - use the K8s built-in certificate signing system signed certificates: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
+ # - or use cert-manager signed certificates: https://cert-manager.io/
+ # 2. Create one secret object for one component which contains the certificates created above.
+ # The name of this Secret must be: --cluster-secret.
+ # For PD: kubectl create secret generic -pd-cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt=
+ # For TiKV: kubectl create secret generic -tikv-cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt=
+ # For TiDB: kubectl create secret generic -tidb-cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt=
+ # For Client: kubectl create secret generic -cluster-client-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt=
+ # Same for other components.
+ # 3. Then create the TiDB cluster with `tlsCluster.enabled` set to `true`.
+ enabled: false
pd:
# Please refer to https://github.com/pingcap/pd/blob/master/conf/config.toml for the default
@@ -75,9 +97,12 @@ pd:
# pd Service
# we can only specify clusterIP and loadBalancerIP now
- service:
- clusterIP: "None"
-
+ service: {}
+ # type: "< default use global service type >"
+ # loadBalancerIP: ""
+ # clusterIP: ""
+ # annotations: {} ""
+ # portName: "client"
replicas: 3
image: pingcap/pd:v3.0.8
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
@@ -284,6 +309,13 @@ tikv:
# After waiting for 5 minutes, TiDB Operator creates a new TiKV node if this TiKV node is still down.
# maxFailoverCount is used to configure the maximum number of TiKV nodes that TiDB Operator can create when failover occurs.
maxFailoverCount: 3
+ # postArgscript is the script executed after the normal tikv instance start args is built,
+ # it is recommended to modify the args constructor logic if you have any special needs.
+ postArgScript: |
+ if [ ! -z "${STORE_LABELS:-}" ]; then
+ LABELS=" --labels ${STORE_LABELS} "
+ ARGS="${ARGS}${LABELS}"
+ fi
tidb:
# Please refer to https://github.com/pingcap/tidb/blob/master/config/config.toml.example for the default
@@ -396,9 +428,11 @@ tidb:
service:
type: NodePort
exposeStatus: true
+ # portName: "mysql-client"
# annotations:
# cloud.google.com/load-balancer-type: Internal
separateSlowLog: true
+
slowLogTailer:
image: busybox:1.26.2
resources:
@@ -411,12 +445,12 @@ tidb:
initializer:
resources: {}
- # limits:
- # cpu: 100m
- # memory: 100Mi
- # requests:
- # cpu: 100m
- # memory: 100Mi
+ # limits:
+ # cpu: 100m
+ # memory: 100Mi
+ # requests:
+ # cpu: 100m
+ # memory: 100Mi
# tidb plugin configuration
plugin:
@@ -428,10 +462,22 @@ tidb:
list: ["whitelist-1"]
# Whether enable TLS connection between TiDB server and MySQL client.
- # When enabled, TiDB will accept TLS encrypted connections from MySQL client, certificates will be generated
- # automatically.
- # Note: TLS connection is not forced on the server side, plain connections are also accepted after enableing.
- enableTLSClient: false
+ # https://pingcap.com/docs/stable/how-to/secure/enable-tls-clients/
+ tlsClient:
+ # The steps to enable this feature:
+ # 1. Generate a TiDB server-side certificate and a client-side certifiacete for the TiDB cluster.
+ # There are multiple ways to generate certificates:
+ # - user-provided certificates: https://pingcap.com/docs/stable/how-to/secure/enable-tls-clients/
+ # - use the K8s built-in certificate signing system signed certificates: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
+ # - or use cert-manager signed certificates: https://cert-manager.io/
+ # 2. Create a K8s Secret object which contains the TiDB server-side certificate created above.
+ # The name of this Secret must be: -tidb-server-secret.
+ # kubectl create secret generic -tidb-server-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt=
+ # 3. Create a K8s Secret object which contains the TiDB client-side certificate created above which will be used by TiDB Operator.
+ # The name of this Secret must be: -tidb-client-secret.
+ # kubectl create secret generic -tidb-client-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt=
+ # 4. Then create the TiDB cluster with `tlsClient.enabled` set to `true`.
+ enabled: false
# mysqlClient is used to set password for TiDB
# it must has Python MySQL client installed
@@ -596,6 +642,7 @@ binlog:
# pump configurations (change to the tags of your pump version),
# just follow the format in the file and configure in the 'config' section
# as below if you want to customize any configuration.
+ # [security] section will be generated automatically if tlsCluster.enabled is set to true so users do not need to configure it.
# config: |
# gc = 7
# heartbeat-interval = 2
@@ -683,7 +730,7 @@ binlog:
scheduledBackup:
create: false
# https://github.com/pingcap/tidb-cloud-backup
- mydumperImage: pingcap/tidb-cloud-backup:20191217
+ mydumperImage: pingcap/tidb-cloud-backup:20200229
mydumperImagePullPolicy: IfNotPresent
# storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer.
# different classes might map to quality-of-service levels, or to backup policies,
@@ -724,6 +771,7 @@ scheduledBackup:
# backup to gcp
gcp: {}
# bucket: ""
+ # prefix: ""
# secretName is the name of the secret which stores the gcp service account credentials json file
# The service account must have read/write permission to the above bucket.
# Read the following document to create the service account and download the credentials file as credentials.json:
@@ -744,6 +792,7 @@ scheduledBackup:
s3: {}
# region: ""
# bucket: ""
+ # prefix: ""
# secretName is the name of the secret which stores s3 object store access key and secret key
# You can create the secret by:
# kubectl create secret generic s3-backup-secret --from-literal=access_key= --from-literal=secret_key=
@@ -757,6 +806,15 @@ scheduledBackup:
# cpu: 4000m
# memory: 4Gi
+ ## affinity defines pd scheduling rules,it's default settings is empty.
+ ## please read the affinity document before set your scheduling rule:
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+ affinity: {}
+
+ ## Tolerations are applied to pods, and allow pods to schedule onto nodes with matching taints.
+ ## refer to https://kubernetes.io/docs/concepts/configuration/taint-and-toleration
+ tolerations: []
+
importer:
create: false
image: pingcap/tidb-lightning:v3.0.8
diff --git a/charts/tidb-drainer/templates/_helpers.tpl b/charts/tidb-drainer/templates/_helpers.tpl
index 082f3615cc..fe2c408e71 100644
--- a/charts/tidb-drainer/templates/_helpers.tpl
+++ b/charts/tidb-drainer/templates/_helpers.tpl
@@ -1,6 +1,14 @@
{{- define "drainer.name" -}}
+{{- if .Values.drainerName -}}
+{{ .Values.drainerName }}
+{{- else -}}
{{ .Values.clusterName }}-{{ .Release.Name }}-drainer
{{- end -}}
+{{- end -}}
+
+{{- define "drainer.tlsSecretName" -}}
+{{ .Values.clusterName }}-drainer-cluster-secret
+{{- end -}}
{{/*
Encapsulate config data for consistent digest calculation
@@ -10,12 +18,25 @@ config-file: |-
{{- if .Values.config }}
{{ .Values.config | indent 2 }}
{{- end -}}
+ {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }}
+ [security]
+ ssl-ca = "/var/lib/drainer-tls/ca.crt"
+ ssl-cert = "/var/lib/drainer-tls/tls.crt"
+ ssl-key = "/var/lib/drainer-tls/tls.key"
+ {{- if .Values.tlsCluster.certAllowedCN }}
+ cert-allowed-cn = {{ .Values.tlsCluster.certAllowedCN | toJson }}
+ {{- end -}}
+ {{- end -}}
{{- end -}}
{{- define "drainer-configmap.name" -}}
{{ include "drainer.name" . }}-{{ include "drainer-configmap.data" . | sha256sum | trunc 8 }}
{{- end -}}
+{{- define "cluster.scheme" -}}
+{{ if and .Values.tlsCluster .Values.tlsCluster.enabled }}https{{ else }}http{{ end }}
+{{- end -}}
+
{{- define "helm-toolkit.utils.template" -}}
{{- $name := index . 0 -}}
{{- $context := index . 1 -}}
diff --git a/charts/tidb-drainer/templates/drainer-statefulset.yaml b/charts/tidb-drainer/templates/drainer-statefulset.yaml
index 8d162763f5..e0bb02776a 100644
--- a/charts/tidb-drainer/templates/drainer-statefulset.yaml
+++ b/charts/tidb-drainer/templates/drainer-statefulset.yaml
@@ -46,6 +46,11 @@ spec:
mountPath: /data
- name: config
mountPath: /etc/drainer
+ {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }}
+ - name: drainer-tls
+ mountPath: /var/lib/drainer-tls
+ readOnly: true
+ {{- end }}
{{- if and (ne .Values.timezone "UTC") (ne .Values.timezone "") }}
env:
- name: TZ
@@ -60,6 +65,11 @@ spec:
items:
- key: config-file
path: drainer.toml
+ {{- if and .Values.tlsCluster .Values.tlsCluster.enabled }}
+ - name: drainer-tls
+ secret:
+ secretName: {{ include "drainer.tlsSecretName" . }}
+ {{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
diff --git a/charts/tidb-drainer/templates/scripts/_start_drainer.sh.tpl b/charts/tidb-drainer/templates/scripts/_start_drainer.sh.tpl
index ada3d3128f..8f258ce6f3 100644
--- a/charts/tidb-drainer/templates/scripts/_start_drainer.sh.tpl
+++ b/charts/tidb-drainer/templates/scripts/_start_drainer.sh.tpl
@@ -26,7 +26,7 @@ done
/drainer \
-L={{ .Values.logLevel | default "info" }} \
--pd-urls=http://{{ .Values.clusterName }}-pd:2379 \
+-pd-urls={{ include "cluster.scheme" . }}://{{ .Values.clusterName }}-pd:2379 \
-addr=`echo ${HOSTNAME}`.{{ include "drainer.name" . }}:8249 \
-config=/etc/drainer/drainer.toml \
-disable-detect={{ .Values.disableDetect | default false }} \
diff --git a/charts/tidb-drainer/values.yaml b/charts/tidb-drainer/values.yaml
index 66ac70655c..6d951eab8b 100644
--- a/charts/tidb-drainer/values.yaml
+++ b/charts/tidb-drainer/values.yaml
@@ -5,6 +5,11 @@
# timezone is the default system timzone
timezone: UTC
+# Change the name of the statefulset and pod
+# The default is clusterName-ReleaseName-drainer
+# Do not change the name of an existing running drainer: this is unsupported.
+# drainerName:
+
# clusterName is the TiDB cluster name that should backup from or restore to.
clusterName: demo
clusterVersion: v3.0.8
@@ -24,7 +29,26 @@ disableDetect: false
# if drainer donesn't have checkpoint, use initial commitTS to initial checkpoint
initialCommitTs: 0
+# Whether enable the TLS connection between TiDB server components
+tlsCluster:
+ # The steps to enable this feature:
+ # 1. Generate Drainer certificate.
+ # There are multiple ways to generate these certificates:
+ # - user-provided certificates: https://pingcap.com/docs/stable/how-to/secure/generate-self-signed-certificates/
+ # - use the K8s built-in certificate signing system signed certificates: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
+ # - or use cert-manager signed certificates: https://cert-manager.io/
+ # 2. Create one secret object for Drainer which contains the certificates created above.
+ # The name of this Secret must be: -drainer-cluster-secret.
+ # For Drainer: kubectl create secret generic -drainer-cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt=
+ # 3. Then create the Drainer cluster with `tlsCluster.enabled` set to `true`.
+ enabled: false
+
+ # certAllowedCN is the Common Name that allowed
+ certAllowedCN: []
+ # - TiDB
+
# Refer to https://github.com/pingcap/tidb-binlog/blob/master/cmd/drainer/drainer.toml
+# [security] section will be generated automatically if tlsCluster.enabled is set to true so users do not need to configure it.
config: |
detect-interval = 10
compressor = ""
diff --git a/charts/tidb-lightning/templates/job.yaml b/charts/tidb-lightning/templates/job.yaml
index 4a4b46433d..2f5e9a8ba7 100644
--- a/charts/tidb-lightning/templates/job.yaml
+++ b/charts/tidb-lightning/templates/job.yaml
@@ -28,6 +28,9 @@ spec:
{{ toYaml .Values.annotations | indent 8 }}
{{- end }}
spec:
+ {{- if .Values.serviceAccount }}
+ serviceAccountName: {{ .Values.serviceAccount }}
+ {{- end }}
{{ if and .Values.dataSource.local.hostPath .Values.dataSource.local.nodeName -}}
nodeName: {{ .Values.dataSource.local.nodeName }}
{{ else if not .Values.dataSource.adhoc.pvcName -}}
@@ -134,6 +137,10 @@ spec:
{{- if .Values.affinity }}
affinity:
{{ toYaml .Values.affinity | indent 6 }}
+ {{- end }}
+ {{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations:
diff --git a/charts/tidb-lightning/templates/rclone-conf.yaml b/charts/tidb-lightning/templates/rclone-conf.yaml
index 77cf277ed6..90fc7b8672 100644
--- a/charts/tidb-lightning/templates/rclone-conf.yaml
+++ b/charts/tidb-lightning/templates/rclone-conf.yaml
@@ -1,10 +1,9 @@
+{{- if .Values.dataSource.remote.rcloneConfig }}
apiVersion: v1
kind: ConfigMap
metadata:
name: rclone-{{ include "tidb-lightning.name" . }}
-type: Opaque
data:
config-file: |-
- {{- if .Values.dataSource.remote.rcloneConfig }}
{{ .Values.dataSource.remote.rcloneConfig | indent 4 }}
- {{- end -}}
+{{- end }}
diff --git a/charts/tidb-lightning/templates/scripts/_start_data_retriever.sh.tpl b/charts/tidb-lightning/templates/scripts/_start_data_retriever.sh.tpl
index 96e16a1bc9..4529f7ce09 100644
--- a/charts/tidb-lightning/templates/scripts/_start_data_retriever.sh.tpl
+++ b/charts/tidb-lightning/templates/scripts/_start_data_retriever.sh.tpl
@@ -1,4 +1,8 @@
set -euo pipefail
+{{ if .Values.dataSource.remote.directory }}
+# rclone sync skip identical files automatically
+rclone --config /etc/rclone/rclone.conf sync -P {{ .Values.dataSource.remote.directory}} /data
+{{- else -}}
filename=$(basename {{ .Values.dataSource.remote.path }})
if find /data -name metadata | egrep '.*'; then
echo "data already exist"
@@ -7,3 +11,4 @@ else
rclone --config /etc/rclone/rclone.conf copy -P {{ .Values.dataSource.remote.path }} /data
cd /data && tar xzvf ${filename}
fi
+{{- end -}}
diff --git a/charts/tidb-lightning/templates/scripts/_start_lightning.sh.tpl b/charts/tidb-lightning/templates/scripts/_start_lightning.sh.tpl
index ba8d2708cc..32ec7be54b 100644
--- a/charts/tidb-lightning/templates/scripts/_start_lightning.sh.tpl
+++ b/charts/tidb-lightning/templates/scripts/_start_lightning.sh.tpl
@@ -2,6 +2,16 @@
data_dir={{ .Values.dataSource.local.hostPath }}
{{- else if .Values.dataSource.adhoc.pvcName -}}
data_dir=/var/lib/tidb-lightning/{{ .Values.dataSource.adhoc.backupName | default .Values.dataSource.adhoc.pvcName }}
+{{- else if .Values.dataSource.remote.directory -}}
+data_dir=/var/lib/tidb-lightning
+if [ -z "$(ls -A ${data_dir})" ]; then
+ if [ ! -z ${FAIL_FAST} ]; then
+ exit 1
+ else
+ echo "No files in data dir, please exec into my container to diagnose"
+ tail -f /dev/null
+ fi
+fi
{{- else -}}
data_dir=$(dirname $(find /var/lib/tidb-lightning -name metadata 2>/dev/null) 2>/dev/null)
if [ -z $data_dir ]; then
diff --git a/charts/tidb-lightning/values.yaml b/charts/tidb-lightning/values.yaml
index d294728992..fc95e7b367 100644
--- a/charts/tidb-lightning/values.yaml
+++ b/charts/tidb-lightning/values.yaml
@@ -25,11 +25,13 @@ dataSource:
# pvcName: tidb-cluster-scheduled-backup
# backupName: scheduled-backup-20190822-041004
remote:
- rcloneImage: tynor88/rclone
+ rcloneImage: pingcap/tidb-cloud-backup:20200229
storageClassName: local-storage
storage: 100Gi
secretName: cloud-storage-secret
path: s3:bench-data-us/sysbench/sbtest_16_1e7.tar.gz
+ # Directory support downloading all files in a remote directory, shadow dataSoure.remote.path if present
+ # directory: s3:bench-data-us
# If rcloneConfig is configured, then `secretName` will be ignored,
# `rcloneConfig` should only be used for the cases where no sensitive
# information need to be configured, e.g. the configuration as below,
@@ -72,6 +74,9 @@ affinity: {}
backend: importer # importer | tidb
+# Specify a Service Account for lightning
+# serviceAccount:
+
config: |
[lightning]
level = "info"
diff --git a/charts/tidb-operator/templates/admission/admission-webhook-deployment.yaml b/charts/tidb-operator/templates/admission/admission-webhook-deployment.yaml
index 228cdb590b..1efd651728 100644
--- a/charts/tidb-operator/templates/admission/admission-webhook-deployment.yaml
+++ b/charts/tidb-operator/templates/admission/admission-webhook-deployment.yaml
@@ -30,9 +30,11 @@ spec:
imagePullPolicy: {{ .Values.imagePullPolicy | default "IfNotPresent" }}
command:
- /usr/local/bin/tidb-admission-webhook
+ # use > 1024 port, then we can run it as non-root user
+ - --secure-port=6443
{{- if eq .Values.admissionWebhook.apiservice.insecureSkipTLSVerify false }}
- - --tls-cert-file=/var/serving-cert/cert.pem
- - --tls-private-key-file=/var/serving-cert/key.pem
+ - --tls-cert-file=/var/serving-cert/tls.crt
+ - --tls-private-key-file=/var/serving-cert/tls.key
{{- end }}
{{- if .Values.features }}
- --features={{ join "," .Values.features }}
@@ -41,7 +43,7 @@ spec:
failureThreshold: 5
httpGet:
path: /healthz
- port: 443
+ port: 6443
scheme: HTTPS
initialDelaySeconds: 5
timeoutSeconds: 5
@@ -49,20 +51,32 @@ spec:
failureThreshold: 5
httpGet:
path: /healthz
- port: 443
+ port: 6443
scheme: HTTPS
initialDelaySeconds: 5
timeoutSeconds: 5
- {{- if eq .Values.admissionWebhook.apiservice.insecureSkipTLSVerify false }}
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
volumeMounts:
+ {{- if eq .Values.admissionWebhook.apiservice.insecureSkipTLSVerify false }}
- mountPath: /var/serving-cert
name: serving-cert
+ {{- else }}
+ - mountPath: /apiserver.local.config
+ name: apiserver-local-config
{{- end }}
- {{- if eq .Values.admissionWebhook.apiservice.insecureSkipTLSVerify false }}
volumes:
+ {{- if eq .Values.admissionWebhook.apiservice.insecureSkipTLSVerify false }}
- name: serving-cert
secret:
defaultMode: 420
- secretName: tidb-admission-webhook-certs
+ secretName: {{ .Values.admissionWebhook.apiservice.tlsSecret }}
+ {{- else }}
+ # rootfs maybe read-only, we need to an empty dir volume to store self-signed certifiates, etc.
+ - name: apiserver-local-config
+ emptyDir: {}
{{- end }}
{{- end }}
diff --git a/charts/tidb-operator/templates/admission/admission-webhook-rbac.yaml b/charts/tidb-operator/templates/admission/admission-webhook-rbac.yaml
index 748be3a295..4d78b8e603 100644
--- a/charts/tidb-operator/templates/admission/admission-webhook-rbac.yaml
+++ b/charts/tidb-operator/templates/admission/admission-webhook-rbac.yaml
@@ -27,6 +27,9 @@ rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch", "update"]
+ - apiGroups: [""]
+ resources: ["secrets","configmaps"]
+ verbs: ["get", "list"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create","patch","update"]
diff --git a/charts/tidb-operator/templates/admission/admission-webhook-registration.yaml b/charts/tidb-operator/templates/admission/admission-webhook-registration.yaml
index 54def4b222..f39dc9262d 100644
--- a/charts/tidb-operator/templates/admission/admission-webhook-registration.yaml
+++ b/charts/tidb-operator/templates/admission/admission-webhook-registration.yaml
@@ -13,7 +13,7 @@ spec:
{{- if .Values.admissionWebhook.apiservice.insecureSkipTLSVerify }}
insecureSkipTLSVerify: true
{{- else }}
- caBundle: {{ .Values.admissionWebhook.apiservice.cert | b64enc }}
+ caBundle: {{ .Values.admissionWebhook.apiservice.caBundle }}
{{- end }}
group: admission.tidb.pingcap.com
groupPriorityMinimum: 1000
@@ -23,11 +23,11 @@ spec:
namespace: {{ .Release.Namespace }}
version: v1alpha1
---
-{{- if .Values.admissionWebhook.hooksEnabled.pods }}
+{{- if .Values.admissionWebhook.validation.pods }}
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
- name: validation-delete-tidb-admission-webhook-cfg
+ name: validation-tidb-pod-webhook-cfg
labels:
app.kubernetes.io/name: {{ template "chart.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
@@ -35,34 +35,36 @@ metadata:
app.kubernetes.io/component: admission-webhook
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
webhooks:
- - name: delete.podadmission.tidb.pingcap.com
+ - name: podadmission.tidb.pingcap.com
{{- if semverCompare ">=1.15-0" .Capabilities.KubeVersion.GitVersion }}
objectSelector:
matchLabels:
"app.kubernetes.io/managed-by": "tidb-operator"
"app.kubernetes.io/name": "tidb-cluster"
{{- end }}
- failurePolicy: {{ .Values.admissionWebhook.failurePolicy.deletePod | default "Fail" }}
+ failurePolicy: {{ .Values.admissionWebhook.failurePolicy.validation | default "Fail" }}
clientConfig:
service:
name: kubernetes
namespace: default
path: "/apis/admission.tidb.pingcap.com/v1alpha1/admissionreviews"
{{- if .Values.admissionWebhook.cabundle }}
- caBundle: {{ .Values.admissionWebhook.cabundle | b64enc }}
+ caBundle: {{ .Values.admissionWebhook.cabundle }}
{{- else }}
caBundle: null
{{- end }}
rules:
- - operations: ["DELETE"]
+ - operations: ["DELETE","CREATE"]
apiGroups: [""]
apiVersions: ["v1"]
resources: ["pods"]
+{{- end }}
---
+{{- if .Values.admissionWebhook.validation.statefulSets }}
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
- name: validation-create-tidb-admission-webhook-cfg
+ name: validation-tidb-statefulset-webhook-cfg
labels:
app.kubernetes.io/name: {{ template "chart.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
@@ -70,36 +72,40 @@ metadata:
app.kubernetes.io/component: admission-webhook
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
webhooks:
- - name: create.podadmission.tidb.pingcap.com
+ - name: stsadmission.tidb.pingcap.com
{{- if semverCompare ">=1.15-0" .Capabilities.KubeVersion.GitVersion }}
objectSelector:
matchLabels:
"app.kubernetes.io/managed-by": "tidb-operator"
"app.kubernetes.io/name": "tidb-cluster"
{{- end }}
- failurePolicy: {{ .Values.admissionWebhook.failurePolicy.createPod | default "Ignore" }}
+ failurePolicy: {{ .Values.admissionWebhook.failurePolicy.validation | default "Ignore" }}
clientConfig:
service:
name: kubernetes
namespace: default
path: "/apis/admission.tidb.pingcap.com/v1alpha1/admissionreviews"
{{- if .Values.admissionWebhook.cabundle }}
- caBundle: {{ .Values.admissionWebhook.cabundle | b64enc }}
+ caBundle: {{ .Values.admissionWebhook.cabundle }}
{{- else }}
caBundle: null
{{- end }}
rules:
- - operations: ["CREATE"]
- apiGroups: [""]
- apiVersions: ["v1"]
- resources: ["pods"]
+ - operations: [ "UPDATE" ]
+ apiGroups: [ "apps", "" ]
+ apiVersions: ["v1beta1", "v1"]
+ resources: ["statefulsets"]
+ - operations: [ "UPDATE" ]
+ apiGroups: [ "apps.pingcap.com"]
+ apiVersions: ["v1alpha1", "v1"]
+ resources: ["statefulsets"]
{{- end }}
---
-{{- if .Values.admissionWebhook.hooksEnabled.statefulSets }}
+{{- if .Values.admissionWebhook.validation.pingcapResources }}
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
- name: validation-update-tidb-admission-webhook-cfg
+ name: pingcap-tidb-resources-validating
labels:
app.kubernetes.io/name: {{ template "chart.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
@@ -107,34 +113,30 @@ metadata:
app.kubernetes.io/component: admission-webhook
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
webhooks:
- - name: update.stsadmission.tidb.pingcap.com
- failurePolicy: {{ .Values.admissionWebhook.failurePolicy.updateStatefulSet | default "Ignore" }}
+ - name: validating.admission.tidb.pingcap.com
+ failurePolicy: {{ .Values.admissionWebhook.failurePolicy.validation | default "Ignore" }}
clientConfig:
service:
name: kubernetes
namespace: default
path: "/apis/admission.tidb.pingcap.com/v1alpha1/admissionreviews"
{{- if .Values.admissionWebhook.cabundle }}
- caBundle: {{ .Values.admissionWebhook.cabundle | b64enc }}
+ caBundle: {{ .Values.admissionWebhook.cabundle }}
{{- else }}
caBundle: null
{{- end }}
rules:
- - operations: [ "UPDATE" ]
- apiGroups: [ "apps", "" ]
- apiVersions: ["v1beta1", "v1"]
- resources: ["statefulsets"]
- - operations: [ "UPDATE" ]
- apiGroups: [ "apps.pingcap.com"]
- apiVersions: ["v1alpha1", "v1"]
- resources: ["statefulsets"]
+ - operations: [ "UPDATE", "CREATE" ]
+ apiGroups: [ "pingcap.com"]
+ apiVersions: ["v1alpha1"]
+ resources: ["tidbclusters"]
{{- end }}
---
-{{- if .Values.admissionWebhook.hooksEnabled.validating }}
+{{- if .Values.admissionWebhook.mutation.pingcapResources }}
apiVersion: admissionregistration.k8s.io/v1beta1
-kind: ValidatingWebhookConfiguration
+kind: MutatingWebhookConfiguration
metadata:
- name: pingcap-resources-validating
+ name: pingcap-tidb-resources-defaulitng
labels:
app.kubernetes.io/name: {{ template "chart.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
@@ -142,15 +144,15 @@ metadata:
app.kubernetes.io/component: admission-webhook
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
webhooks:
- - name: validating.admission.tidb.pingcap.com
- failurePolicy: {{ .Values.admissionWebhook.failurePolicy.validating | default "Ignore" }}
+ - name: defaulting.admission.tidb.pingcap.com
+ failurePolicy: {{ .Values.admissionWebhook.failurePolicy.mutation | default "Ignore" }}
clientConfig:
service:
name: kubernetes
namespace: default
- path: "/apis/admission.tidb.pingcap.com/v1alpha1/admissionreviews"
+ path: "/apis/admission.tidb.pingcap.com/v1alpha1/mutatingreviews"
{{- if .Values.admissionWebhook.cabundle }}
- caBundle: {{ .Values.admissionWebhook.cabundle | b64enc }}
+ caBundle: {{ .Values.admissionWebhook.cabundle }}
{{- else }}
caBundle: null
{{- end }}
@@ -161,11 +163,11 @@ webhooks:
resources: ["tidbclusters"]
{{- end }}
---
-{{- if .Values.admissionWebhook.hooksEnabled.defaulting }}
+{{- if .Values.admissionWebhook.mutation.pods }}
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
- name: pingcap-resources-defaulitng
+ name: mutation-tidb-pod-webhook-cfg
labels:
app.kubernetes.io/name: {{ template "chart.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
@@ -173,22 +175,28 @@ metadata:
app.kubernetes.io/component: admission-webhook
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
webhooks:
- - name: defaulting.dmission.tidb.pingcap.com
- failurePolicy: {{ .Values.admissionWebhook.failurePolicy.defaulting | default "Ignore" }}
+ - name: podadmission.tidb.pingcap.com
+ {{- if semverCompare ">=1.15-0" .Capabilities.KubeVersion.GitVersion }}
+ objectSelector:
+ matchLabels:
+ "app.kubernetes.io/managed-by": "tidb-operator"
+ "app.kubernetes.io/name": "tidb-cluster"
+ {{- end }}
+ failurePolicy: {{ .Values.admissionWebhook.failurePolicy.mutation | default "Ignore" }}
clientConfig:
service:
name: kubernetes
namespace: default
path: "/apis/admission.tidb.pingcap.com/v1alpha1/mutatingreviews"
{{- if .Values.admissionWebhook.cabundle }}
- caBundle: {{ .Values.admissionWebhook.cabundle | b64enc }}
+ caBundle: {{ .Values.admissionWebhook.cabundle }}
{{- else }}
caBundle: null
{{- end }}
rules:
- - operations: [ "UPDATE", "CREATE" ]
- apiGroups: [ "pingcap.com"]
- apiVersions: ["v1alpha1"]
- resources: ["tidbclusters"]
+ - operations: ["CREATE"]
+ apiGroups: [""]
+ apiVersions: ["v1"]
+ resources: ["pods"]
{{- end }}
{{- end }}
diff --git a/charts/tidb-operator/templates/admission/admission-webhook-secret.yaml b/charts/tidb-operator/templates/admission/admission-webhook-secret.yaml
deleted file mode 100644
index 5723299e68..0000000000
--- a/charts/tidb-operator/templates/admission/admission-webhook-secret.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-{{- if and ( .Values.admissionWebhook.create ) ( eq .Values.admissionWebhook.apiservice.insecureSkipTLSVerify false ) }}
-apiVersion: v1
-kind: Secret
-metadata:
- name: tidb-admission-webhook-certs
- labels:
- app.kubernetes.io/name: {{ template "chart.name" . }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/component: admission-cert
- helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
-data:
- cert.pem: {{ .Values.admissionWebhook.apiservice.cert | b64enc }}
- key.pem: {{ .Values.admissionWebhook.apiservice.key | b64enc }}
-{{- end }}
diff --git a/charts/tidb-operator/templates/admission/admission-webhook-service.yaml b/charts/tidb-operator/templates/admission/admission-webhook-service.yaml
index c4fa485745..fa64e37f29 100644
--- a/charts/tidb-operator/templates/admission/admission-webhook-service.yaml
+++ b/charts/tidb-operator/templates/admission/admission-webhook-service.yaml
@@ -13,7 +13,7 @@ spec:
ports:
- name: https-webhook # optional
port: 443
- targetPort: 443
+ targetPort: 6443
selector:
app.kubernetes.io/name: {{ template "chart.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
diff --git a/charts/tidb-operator/templates/admission/pre-delete-job.yaml b/charts/tidb-operator/templates/admission/pre-delete-job.yaml
index 26a9fe3a46..205c407d2c 100644
--- a/charts/tidb-operator/templates/admission/pre-delete-job.yaml
+++ b/charts/tidb-operator/templates/admission/pre-delete-job.yaml
@@ -1,4 +1,4 @@
-{{- if and ( .Values.admissionWebhook.create ) ( .Values.admissionWebhook.hooksEnabled.pods ) }}
+{{- if and ( .Values.admissionWebhook.create ) ( .Values.admissionWebhook.validation.pods ) }}
apiVersion: v1
kind: ServiceAccount
metadata:
@@ -96,5 +96,5 @@ spec:
- "-c"
- |
set -e
- kubectl delete validatingWebhookConfigurations.admissionregistration.k8s.io validation-delete-tidb-admission-webhook-cfg || true
+ kubectl delete validatingWebhookConfigurations.admissionregistration.k8s.io validation-tidb-pod-webhook-cfg || true
{{- end }}
diff --git a/charts/tidb-operator/templates/controller-manager-deployment.yaml b/charts/tidb-operator/templates/controller-manager-deployment.yaml
index 7de4a59a79..56a86edfe2 100644
--- a/charts/tidb-operator/templates/controller-manager-deployment.yaml
+++ b/charts/tidb-operator/templates/controller-manager-deployment.yaml
@@ -38,9 +38,15 @@ spec:
{{- end }}
- -tidb-discovery-image={{ .Values.operatorImage }}
- -cluster-scoped={{ .Values.clusterScoped }}
- - -auto-failover={{ .Values.controllerManager.autoFailover | default true }}
+ {{- if eq .Values.controllerManager.autoFailover true }}
+ - -auto-failover=true
+ {{- end }}
+ {{- if eq .Values.controllerManager.autoFailover false }}
+ - -auto-failover=false
+ {{- end }}
- -pd-failover-period={{ .Values.controllerManager.pdFailoverPeriod | default "5m" }}
- -tikv-failover-period={{ .Values.controllerManager.tikvFailoverPeriod | default "5m" }}
+ - -tiflash-failover-period={{ .Values.controllerManager.tiflashFailoverPeriod | default "5m" }}
- -tidb-failover-period={{ .Values.controllerManager.tidbFailoverPeriod | default "5m" }}
- -v={{ .Values.controllerManager.logLevel }}
{{- if .Values.testMode }}
@@ -49,7 +55,7 @@ spec:
{{- if .Values.features }}
- -features={{ join "," .Values.features }}
{{- end }}
- {{- if and ( .Values.admissionWebhook.create ) ( .Values.admissionWebhook.hooksEnabled.pods ) }}
+ {{- if and ( .Values.admissionWebhook.create ) ( .Values.admissionWebhook.validation.pods ) }}
- -pod-webhook-enabled=true
{{- end }}
env:
diff --git a/charts/tidb-operator/templates/controller-manager-rbac.yaml b/charts/tidb-operator/templates/controller-manager-rbac.yaml
index 7e4930ef9c..079e897a0f 100644
--- a/charts/tidb-operator/templates/controller-manager-rbac.yaml
+++ b/charts/tidb-operator/templates/controller-manager-rbac.yaml
@@ -1,3 +1,6 @@
+{{/*
+Delete permission is required in OpenShift because we can't own resources we created if we can't delete them.
+*/}}
{{- if .Values.rbac.create }}
kind: ServiceAccount
apiVersion: v1
@@ -29,16 +32,16 @@ rules:
verbs: ["*"]
- apiGroups: [""]
resources: ["endpoints","configmaps"]
- verbs: ["create", "get", "list", "watch", "update"]
+ verbs: ["create", "get", "list", "watch", "update","delete"]
- apiGroups: [""]
resources: ["serviceaccounts"]
- verbs: ["create","get","update"]
+ verbs: ["create","get","update","delete"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: [""]
resources: ["secrets"]
- verbs: ["create", "get", "list", "watch"]
+ verbs: ["create", "update", "get", "list", "watch","delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
@@ -71,22 +74,16 @@ rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch","update"]
-- apiGroups: ["certificates.k8s.io"]
- resources: ["certificatesigningrequests"]
- verbs: ["create", "get", "list", "watch", "delete"]
-- apiGroups: ["certificates.k8s.io"]
- resources: ["certificatesigningrequests/approval", "certificatesigningrequests/status"]
- verbs: ["update"]
{{/*
Allow controller manager to escalate its privileges to other subjects, the subjects may never have privilege over the controller.
Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#privilege-escalation-prevention-and-bootstrapping
*/}}
- apiGroups: ["rbac.authorization.k8s.io"]
resources: [clusterroles,roles]
- verbs: ["escalate","create","get","update"]
+ verbs: ["escalate","create","get","update", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["rolebindings","clusterrolebindings"]
- verbs: ["create","get","update"]
+ verbs: ["create","get","update", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
@@ -126,16 +123,16 @@ rules:
verbs: ["*"]
- apiGroups: [""]
resources: ["endpoints","configmaps"]
- verbs: ["create", "get", "list", "watch", "update"]
+ verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: [""]
resources: ["serviceaccounts"]
- verbs: ["create","get","update"]
+ verbs: ["create","get","update","delete"]
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: [""]
resources: ["secrets"]
- verbs: ["create", "get", "list", "watch"]
+ verbs: ["create", "update", "get", "list", "watch", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
@@ -153,10 +150,10 @@ rules:
verbs: ["*"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["roles"]
- verbs: ["escalate","create","get","update"]
+ verbs: ["escalate","create","get","update", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["rolebindings"]
- verbs: ["create","get","update"]
+ verbs: ["create","get","update", "delete"]
{{- if .Values.features | has "AdvancedStatefulSet=true" }}
- apiGroups:
- apps.pingcap.com
diff --git a/charts/tidb-operator/values.yaml b/charts/tidb-operator/values.yaml
index b98fa674af..33c53aac78 100644
--- a/charts/tidb-operator/values.yaml
+++ b/charts/tidb-operator/values.yaml
@@ -12,11 +12,11 @@ rbac:
timezone: UTC
# operatorImage is TiDB Operator image
-operatorImage: pingcap/tidb-operator:v1.1.0-beta.1
+operatorImage: pingcap/tidb-operator:v1.1.0-rc.2
imagePullPolicy: IfNotPresent
# tidbBackupManagerImage is tidb backup manager image
-# tidbBackupManagerImage: pingcap/tidb-backup-manager:latest
+tidbBackupManagerImage: pingcap/tidb-backup-manager:v1.1.0-rc.2
#
# Enable or disable tidb-operator features:
@@ -58,6 +58,8 @@ controllerManager:
tikvFailoverPeriod: 5m
# tidb failover period default(5m)
tidbFailoverPeriod: 5m
+ # tiflash failover period default(5m)
+ tiflashFailoverPeriod: 5m
## affinity defines pod scheduling rules,affinity default settings is empty.
## please read the affinity document before set your scheduling rule:
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
@@ -182,10 +184,11 @@ admissionWebhook:
rbac:
create: true
## jobImage is to indicate the image used in `pre-delete-job.yaml`
- ## if admissionWebhook.create and admissionWebhook.hooksEnabled.pods are both enabled,
+ ## if admissionWebhook.create and admissionWebhook.validation.pods are both enabled,
## The pre-delete-job would delete the validationWebhookConfiguration using this image
jobImage: "bitnami/kubectl:latest"
- hooksEnabled:
+ ## validation webhook would check the given request for the specific resource and operation
+ validation:
## statefulsets hook would check requests for updating tidbcluster's statefulsets
## If enabled it, the statefulsets of tidbcluseter would update in partition by tidbcluster's annotation
statefulSets: false
@@ -193,36 +196,41 @@ admissionWebhook:
## if enabled it, the pods of tidbcluster would safely created or deleted by webhook instead of controller
pods: true
## validating hook validates the correctness of the resources under pingcap.com group
- validating: false
+ pingcapResources: false
+ ## mutation webhook would mutate the given request for the specific resource and operation
+ mutation:
+ ## pods mutation hook would mutate the pod. Currently It is used for TiKV Auto-Scaling.
+ ## refer to https://github.com/pingcap/tidb-operator/issues/1651
+ pods: true
## defaulting hook set default values for the the resources under pingcap.com group
- defaulting: false
+ pingcapResources: true
## failurePolicy are applied to ValidatingWebhookConfiguration which affect tidb-admission-webhook
## refer to https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy
failurePolicy:
- ## deletePod Webhook would check the deleting request of tidbcluster pod and the failurePolicy is recommended as Fail
- deletePod: Fail
- ## createPod Webhook would check the creating request of tidbcluster pod and the failurePolicy is recommended as Ignore
- createPod: Ignore
- ## updateStatefulSet Webhook would check the updating request of tidbcluster statefulset and the failurePolicy is recommended as Ignore
- updateStatefulSet: Ignore
- ## validation hook validates the correctness of the resources under pingcap.com group
- validating: Ignore
- ## defaulting hook set default values for the the resources under pingcap.com group
- defaulting: Ignore
+ ## the validation webhook would check the request of the given resources.
+ ## If the kubernetes api-server version >= 1.15.0, we recommend the failurePolicy as Fail, otherwise, as Ignore.
+ validation: Ignore
+ ## the mutation webhook would mutate the request of the given resources.
+ ## If the kubernetes api-server version >= 1.15.0, we recommend the failurePolicy as Fail, otherwise, as Ignore.
+ mutation: Ignore
## tidb-admission-webhook deployed as kubernetes apiservice server
## refer to https://github.com/openshift/generic-admission-server
apiservice:
## apiservice config
## refer to https://kubernetes.io/docs/tasks/access-kubernetes-api/configure-aggregation-layer/#contacting-the-extension-apiserver
insecureSkipTLSVerify: true
- ## The key and cert for `tidb-admission-webook.` Service.
+ ## The Secret includes the TLS ca, cert and key for the `tidb-admission-webook..svc` Service.
## If insecureSkipTLSVerify is true, this would be ignored.
- cert: ""
- key: ""
+ ## You can create the tls secret by:
+ ## kubectl create secret generic --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt=
+ tlsSecret: ""
+ ## The caBundle for the webhook apiservice, you could get it by the secret you created previously:
+ ## kubectl get secret --namespace= -o=jsonpath='{.data.ca\.crt}'
+ caBundle: ""
## certProvider indicate the key and cert for the webhook configuration to communicate with `kubernetes.default` service.
## If your kube-apiserver's version >= 1.13.0, you can leave cabundle empty and the kube-apiserver
## would trust the roots on the apiserver.
## refer to https://github.com/kubernetes/api/blob/master/admissionregistration/v1/types.go#L529
## or you can get the cabundle by:
- ## kubectl get configmap -n kube-system extension-apiserver-authentication -o=jsonpath='{.data.client-ca-file}'
+ ## kubectl get configmap -n kube-system extension-apiserver-authentication -o=jsonpath='{.data.client-ca-file}' | base64 | tr -d '\n'
cabundle: ""
diff --git a/charts/tikv-importer/.helmignore b/charts/tikv-importer/.helmignore
new file mode 100644
index 0000000000..f0c1319444
--- /dev/null
+++ b/charts/tikv-importer/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/charts/tikv-importer/Chart.yaml b/charts/tikv-importer/Chart.yaml
new file mode 100644
index 0000000000..0c24c3ce3e
--- /dev/null
+++ b/charts/tikv-importer/Chart.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+description: A Helm chart for TiKV Importer
+name: tikv-importer
+version: dev
+home: https://github.com/pingcap/tidb-operator
+sources:
+ - https://github.com/pingcap/tidb-operator
+keywords:
+ - newsql
+ - htap
+ - database
+ - mysql
+ - raft
diff --git a/charts/tikv-importer/templates/_helpers.tpl b/charts/tikv-importer/templates/_helpers.tpl
new file mode 100644
index 0000000000..2372f181e8
--- /dev/null
+++ b/charts/tikv-importer/templates/_helpers.tpl
@@ -0,0 +1,21 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "chart.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Encapsulate tikv-importer configmap data for consistent digest calculation
+*/}}
+{{- define "importer-configmap.data" -}}
+config-file: |-
+ {{- if .Values.config }}
+{{ .Values.config | indent 2 }}
+ {{- end -}}
+{{- end -}}
+
+{{- define "importer-configmap.data-digest" -}}
+{{ include "importer-configmap.data" . | sha256sum | trunc 8 }}
+{{- end -}}
diff --git a/charts/tikv-importer/templates/tikv-importer-configmap.yaml b/charts/tikv-importer/templates/tikv-importer-configmap.yaml
new file mode 100644
index 0000000000..1bbcabafb0
--- /dev/null
+++ b/charts/tikv-importer/templates/tikv-importer-configmap.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Values.clusterName }}-importer-{{ template "importer-configmap.data-digest" . }}
+ labels:
+ app.kubernetes.io/name: {{ template "chart.name" . }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: importer
+ helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+data:
+{{ include "importer-configmap.data" . | indent 2 }}
diff --git a/charts/tikv-importer/templates/tikv-importer-service.yaml b/charts/tikv-importer/templates/tikv-importer-service.yaml
new file mode 100644
index 0000000000..07ded6f97a
--- /dev/null
+++ b/charts/tikv-importer/templates/tikv-importer-service.yaml
@@ -0,0 +1,19 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Values.clusterName }}-importer
+ labels:
+ app.kubernetes.io/name: {{ template "chart.name" . }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: importer
+ helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+spec:
+ clusterIP: None
+ ports:
+ - name: importer
+ port: 8287
+ selector:
+ app.kubernetes.io/name: {{ template "chart.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: importer
diff --git a/charts/tikv-importer/templates/tikv-importer-statefulset.yaml b/charts/tikv-importer/templates/tikv-importer-statefulset.yaml
new file mode 100644
index 0000000000..18a6653ec3
--- /dev/null
+++ b/charts/tikv-importer/templates/tikv-importer-statefulset.yaml
@@ -0,0 +1,89 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ .Values.clusterName }}-importer
+ labels:
+ app.kubernetes.io/name: {{ template "chart.name" . }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: importer
+ helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+spec:
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ template "chart.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: importer
+ serviceName: {{ .Values.clusterName }}-importer
+ replicas: 1
+ template:
+ metadata:
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/path: "/metrics"
+ prometheus.io/port: "9091"
+ labels:
+ app.kubernetes.io/name: {{ template "chart.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: importer
+ spec:
+ {{- if .Values.affinity }}
+ affinity:
+{{ toYaml .Values.affinity | indent 6 }}
+ {{- end }}
+ {{- if .Values.tolerations }}
+ tolerations:
+{{ toYaml .Values.tolerations | indent 6 }}
+ {{- end }}
+ containers:
+ - name: importer
+ image: {{ .Values.image }}
+ imagePullPolicy: {{ .Values.imagePullPolicy | default "IfNotPresent"}}
+ command:
+ - /tikv-importer
+ # tikv-importer does not support domain name: https://github.com/tikv/importer/issues/16
+ # - --addr=${MY_POD_NAME}.tikv-importer:8287
+ - --addr=$(MY_POD_IP):8287
+ - --config=/etc/tikv-importer/tikv-importer.toml
+ - --import-dir=/var/lib/tikv-importer
+ env:
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.name
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: TZ
+ value: {{ .Values.timezone | default "UTC" }}
+ volumeMounts:
+ - name: data
+ mountPath: /var/lib/tikv-importer
+ - name: config
+ mountPath: /etc/tikv-importer
+ {{- if .Values.resources }}
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+ {{- end }}
+ - name: pushgateway
+ image: {{ .Values.pushgatewayImage }}
+ imagePullPolicy: {{ .Values.pushgatewayImagePullPolicy | default "IfNotPresent" }}
+ volumes:
+ - name: config
+ configMap:
+ name: {{ .Values.clusterName }}-importer-{{ template "importer-configmap.data-digest" . }}
+ items:
+ - key: config-file
+ path: tikv-importer.toml
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ storageClassName: {{ .Values.storageClassName }}
+ resources:
+ requests:
+ storage: {{ .Values.storage }}
diff --git a/charts/tikv-importer/values.yaml b/charts/tikv-importer/values.yaml
new file mode 100644
index 0000000000..b0ac35eb5e
--- /dev/null
+++ b/charts/tikv-importer/values.yaml
@@ -0,0 +1,32 @@
+# Default values for tikv-importer.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+# timezone is the default system timzone for TiDB
+timezone: UTC
+
+# clusterName is the TiDB cluster name, if not specified, the chart release name will be used
+clusterName: demo
+
+image: pingcap/tidb-lightning:v3.0.8
+imagePullPolicy: IfNotPresent
+storageClassName: local-storage
+storage: 20Gi
+resources:
+ {}
+ # limits:
+ # cpu: 16000m
+ # memory: 8Gi
+ # requests:
+ # cpu: 16000m
+ # memory: 8Gi
+affinity: {}
+tolerations: []
+pushgatewayImage: prom/pushgateway:v0.3.1
+pushgatewayImagePullPolicy: IfNotPresent
+config: |
+ log-level = "info"
+ [metric]
+ job = "tikv-importer"
+ interval = "15s"
+ address = "localhost:9091"
diff --git a/ci/aws-clean-eks.sh b/ci/aws-clean-eks.sh
new file mode 100755
index 0000000000..fc9c94eb81
--- /dev/null
+++ b/ci/aws-clean-eks.sh
@@ -0,0 +1,150 @@
+#!/bin/bash
+
+# Copyright 2020 PingCAP, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# aws-k8s-tester cannot clean all resources created when some error happened.
+# This script is used to clean resources created by aws-k8s-tester in our CI.
+#
+# DO NOT USE THIS SCRIPT FOR OTHER USES!
+#
+
+function get_stacks() {
+ aws cloudformation list-stacks --stack-status-filter CREATE_COMPLETE DELETE_FAILED --query 'StackSummaries[*].StackName' --output text
+}
+
+function delete_security_group() {
+ local sgId="$1"
+ echo "info: deleting security group '$sgId'"
+ for eni in $(aws ec2 describe-network-interfaces --filters "Name=group-id,Values=$sgId" --query 'NetworkInterfaces[*].NetworkInterfaceId' --output text); do
+ echo "info: clear leaked network interfaces '$eni'"
+ aws ec2 delete-network-interface --network-interface-id "$eni"
+ done
+ aws ec2 delete-security-group --group-id "$sgId"
+ if [ $? -eq 0 ]; then
+ echo "info: succesfully deleted security group '$sgId'"
+ else
+ echo "error: failed to deleted security group '$sgId'"
+ fi
+}
+
+function delete_vpc() {
+ local vpcId="$1"
+ echo "info: deleting vpc '$vpcId'"
+ for sgId in $(aws ec2 describe-security-groups --filters "Name=vpc-id,Values=$vpcId" --query "SecurityGroups[?GroupName != 'default'].GroupId" --output text); do
+ delete_security_group "$sgId"
+ done
+ aws ec2 delete-vpc --vpc-id "$vpcId"
+ if [ $? -eq 0 ]; then
+ echo "info: succesfully deleted vpc '$vpcId'"
+ else
+ echo "error: failed to deleted vpc '$vpcId'"
+ fi
+}
+
+function fix_eks_mng_deletion_issues() {
+ local cluster="$1"
+ local mng="$2"
+ while IFS=$'\n' read -r line; do
+ read -r code resourceIds <<< $line
+ if [ "$code" == "Ec2SecurityGroupDeletionFailure" ]; then
+ IFS=',' read -ra sgIds <<< "$resourceIds"
+ for sgId in ${sgIds[@]}; do
+ delete_security_group "$sgId"
+ done
+ fi
+ done <<< $(aws eks describe-nodegroup --cluster-name "$cluster" --nodegroup-name "$mng" --query 'nodegroup.health.issues' --output json | jq -r '.[].resourceIds |= join(",") | .[] | "\(.code)\t\(.resourceIds)"')
+}
+
+function clean_eks() {
+ local CLUSTER="$1"
+ echo "info: searching mng stack"
+ local regex='^'$CLUSTER'-mng-[0-9]+$'
+ local mngStack=
+ for stackName in $(get_stacks); do
+ if [[ ! "$stackName" =~ $regex ]]; then
+ continue
+ fi
+ mngStack=$stackName
+ break
+ done
+ if [ -n "$mngStack" ]; then
+ echo "info: mng stack found '$mngStack'"
+ else
+ echo "info: mng stack not found"
+ fi
+
+ echo "info: deleting mng/cluster/cluster-role/mng-role/vpc stacks"
+ local stacks=(
+ $mngStack
+ $CLUSTER-cluster
+ $CLUSTER-role-cluster
+ $CLUSTER-role-mng
+ $CLUSTER-vpc
+ )
+ for stack in ${stacks[@]}; do
+ echo "info: deleting stack $stack"
+ aws cloudformation delete-stack --stack-name $stack
+ aws cloudformation wait stack-delete-complete --stack-name $stack
+ if [ $? -ne 0 ]; then
+ echo "error: failed to delete stack '$stack'"
+ if [ "$stack" == "$mngStack" ]; then
+ echo "info: try to fix mng stack '$stack'"
+ for mngName in $(aws eks list-nodegroups --cluster-name "$CLUSTER" --query 'nodegroups[*]' --output text); do
+ fix_eks_mng_deletion_issues "$CLUSTER" $mngName
+ done
+ elif [ "$stack" == "$CLUSTER-vpc" ]; then
+ echo "info: try to fix vpc stack '$stack'"
+ while IFS=$'\n' read -r sgId; do
+ delete_security_group "$sgId"
+ done <<< $(aws cloudformation describe-stacks --stack-name "$stack" --query 'Stacks[*].Outputs[*]' --output json | jq -r '.[] | .[] | select(.OutputKey == "ControlPlaneSecurityGroupID") | .OutputValue')
+ while IFS=$'\n' read -r vpcId; do
+ delete_vpc "$vpcId"
+ done <<< $(aws cloudformation describe-stacks --stack-name "$stack" --query 'Stacks[*].Outputs[*]' --output json | jq -r '.[] | .[] | select(.OutputKey == "VPCID") | .OutputValue')
+ else
+ echo "fatal: unable to delete stack $stack"
+ exit 1
+ fi
+ echo "info: try to delete the stack '$stack' again"
+ aws cloudformation delete-stack --stack-name $stack
+ aws cloudformation wait stack-delete-complete --stack-name $stack
+ if [ $? -ne 0 ]; then
+ echo "fatal: unable to delete stack $stack"
+ exit 1
+ fi
+ fi
+ done
+}
+
+# https://github.com/aws/aws-cli#other-configurable-variables
+if [ -n "${AWS_REGION}" ]; then
+ export AWS_DEFAULT_REGION=${AWS_REGION:-}
+fi
+
+aws sts get-caller-identity
+if [ $? -ne 0 ]; then
+ echo "error: failed to get caller identity"
+ exit 1
+fi
+
+for CLUSTER in $@; do
+ echo "info: start to clean eks test cluster '$CLUSTER'"
+ clean_eks "$CLUSTER"
+ if [ $? -eq 0 ]; then
+ echo "info: succesfully cleaned the eks test cluster '$CLUSTER'"
+ else
+ echo "fatal: failed to clean the eks test cluster '$CLUSTER'"
+ exit 1
+ fi
+done
diff --git a/ci/deploy_tidb_operator_staging.groovy b/ci/deploy_tidb_operator_staging.groovy
index cfb5a86e22..17b8b8b3cd 100644
--- a/ci/deploy_tidb_operator_staging.groovy
+++ b/ci/deploy_tidb_operator_staging.groovy
@@ -23,12 +23,18 @@ scheduler:
replicas: 2
admissionWebhook:
create: true
- hooksEnabled:
+ replicas: 2
+ validation:
statefulSets: true
pods: true
- # TODO: enable validating and defaulting after we ease the constrain
- validating: false
- defaulting: false
+ pingcapResources: false
+ mutation:
+ pingcapResources: true
+ failurePolicy:
+ validation: Fail
+ mutation: Fail
+features:
+ - AutoScaling=true
'''
def call(BUILD_BRANCH) {
diff --git a/ci/e2e_eks.groovy b/ci/e2e_eks.groovy
new file mode 100644
index 0000000000..1966b95d54
--- /dev/null
+++ b/ci/e2e_eks.groovy
@@ -0,0 +1,159 @@
+//
+// Jenkins pipeline for EKS e2e job.
+//
+// This script is written in declarative syntax. Refer to
+// https://jenkins.io/doc/book/pipeline/syntax/ for more details.
+//
+// Note that parameters of the job is configured in this script.
+//
+
+import groovy.transform.Field
+
+@Field
+def podYAML = '''
+apiVersion: v1
+kind: Pod
+spec:
+ containers:
+ - name: main
+ image: gcr.io/k8s-testimages/kubekins-e2e:v20200311-1e25827-master
+ command:
+ - runner.sh
+ - sleep
+ - 1d
+ # we need privileged mode in order to do docker in docker
+ securityContext:
+ privileged: true
+ env:
+ - name: DOCKER_IN_DOCKER_ENABLED
+ value: "true"
+ resources:
+ requests:
+ memory: "4000Mi"
+ cpu: 2000m
+ volumeMounts:
+ # dind expects /var/lib/docker to be volume
+ - name: docker-root
+ mountPath: /var/lib/docker
+ volumes:
+ - name: docker-root
+ emptyDir: {}
+'''
+
+// Able to override default values in Jenkins job via environment variables.
+if (!env.DEFAULT_GIT_REF) {
+ env.DEFAULT_GIT_REF = "master"
+}
+
+if (!env.DEFAULT_GINKGO_NODES) {
+ env.DEFAULT_GINKGO_NODES = "8"
+}
+
+if (!env.DEFAULT_E2E_ARGS) {
+ env.DEFAULT_E2E_ARGS = "--ginkgo.skip='\\[Serial\\]|\\[Stability\\]' --ginkgo.focus='\\[tidb-operator\\]'"
+}
+
+if (!env.DEFAULT_CLUSTER) {
+ env.DEFAULT_CLUSTER = "jenkins-tidb-operator-e2e"
+}
+
+if (!env.DEFAULT_AWS_REGION) {
+ env.DEFAULT_AWS_REGION = "us-west-2"
+}
+
+pipeline {
+ agent {
+ kubernetes {
+ yaml podYAML
+ defaultContainer "main"
+ customWorkspace "/home/jenkins/agent/workspace/go/src/github.com/pingcap/tidb-operator"
+ }
+ }
+
+ options {
+ timeout(time: 3, unit: 'HOURS')
+ }
+
+ parameters {
+ string(name: 'GIT_URL', defaultValue: 'git@github.com:pingcap/tidb-operator.git', description: 'git repo url')
+ string(name: 'GIT_REF', defaultValue: env.DEFAULT_GIT_REF, description: 'git ref spec to checkout, e.g. master, release-1.1')
+ string(name: 'PR_ID', defaultValue: '', description: 'pull request ID, this will override GIT_REF if set, e.g. 1889')
+ string(name: 'GINKGO_NODES', defaultValue: env.DEFAULT_GINKGO_NODES, description: 'the number of ginkgo nodes')
+ string(name: 'E2E_ARGS', defaultValue: env.DEFAULT_E2E_ARGS, description: "e2e args, e.g. --ginkgo.focus='\\[Stability\\]'")
+ string(name: 'CLUSTER', defaultValue: env.DEFAULT_CLUSTER, description: 'the name of the cluster')
+ string(name: 'AWS_REGION', defaultValue: env.DEFAULT_AWS_REGION, description: 'the AWS region')
+ }
+
+ environment {
+ GIT_REF = ''
+ ARTIFACTS = "${env.WORKSPACE}/artifacts"
+ }
+
+ stages {
+ stage("Prepare") {
+ steps {
+ // The declarative model for Jenkins Pipelines has a restricted
+ // subset of syntax that it allows in the stage blocks. We use
+ // script step to bypass the restriction.
+ // https://jenkins.io/doc/book/pipeline/syntax/#script
+ script {
+ GIT_REF = params.GIT_REF
+ if (params.PR_ID != "") {
+ GIT_REF = "refs/remotes/origin/pr/${params.PR_ID}/head"
+ }
+ }
+ echo "env.NODE_NAME: ${env.NODE_NAME}"
+ echo "env.WORKSPACE: ${env.WORKSPACE}"
+ echo "GIT_REF: ${GIT_REF}"
+ echo "ARTIFACTS: ${ARTIFACTS}"
+ }
+ }
+
+ stage("Checkout") {
+ steps {
+ checkout scm: [
+ $class: 'GitSCM',
+ branches: [[name: GIT_REF]],
+ userRemoteConfigs: [[
+ credentialsId: 'github-sre-bot-ssh',
+ refspec: '+refs/heads/*:refs/remotes/origin/* +refs/pull/*:refs/remotes/origin/pr/*',
+ url: "${params.GIT_URL}",
+ ]]
+ ]
+ }
+ }
+
+ stage("Run") {
+ steps {
+ withCredentials([
+ string(credentialsId: 'TIDB_OPERATOR_AWS_ACCESS_KEY_ID', variable: 'AWS_ACCESS_KEY_ID'),
+ string(credentialsId: 'TIDB_OPERATOR_AWS_SECRET_ACCESS_KEY', variable: 'AWS_SECRET_ACCESS_KEY'),
+ ]) {
+ sh """
+ #!/bin/bash
+ export PROVIDER=eks
+ export CLUSTER=${params.CLUSTER}
+ export AWS_REGION=${params.AWS_REGION}
+ export GINKGO_NODES=${params.GINKGO_NODES}
+ export REPORT_DIR=${ARTIFACTS}
+ echo "info: try to clean the cluster created previously"
+ ./ci/aws-clean-eks.sh \$CLUSTER
+ echo "info: begin to run e2e"
+ ./hack/e2e.sh -- ${params.E2E_ARGS}
+ """
+ }
+ }
+ }
+ }
+
+ post {
+ always {
+ dir(ARTIFACTS) {
+ archiveArtifacts artifacts: "**", allowEmptyArchive: true
+ junit testResults: "*.xml", allowEmptyResults: true
+ }
+ }
+ }
+}
+
+// vim: et sw=4 ts=4
diff --git a/ci/e2e_gke.groovy b/ci/e2e_gke.groovy
new file mode 100644
index 0000000000..e85f5809ce
--- /dev/null
+++ b/ci/e2e_gke.groovy
@@ -0,0 +1,166 @@
+//
+// Jenkins pipeline for GKE e2e job.
+//
+// This script is written in declarative syntax. Refer to
+// https://jenkins.io/doc/book/pipeline/syntax/ for more details.
+//
+// Note that parameters of the job is configured in this script.
+//
+
+import groovy.transform.Field
+
+@Field
+def podYAML = '''
+apiVersion: v1
+kind: Pod
+spec:
+ containers:
+ - name: main
+ image: gcr.io/k8s-testimages/kubekins-e2e:v20200311-1e25827-master
+ command:
+ - runner.sh
+ - sleep
+ - 1d
+ # we need privileged mode in order to do docker in docker
+ securityContext:
+ privileged: true
+ env:
+ - name: DOCKER_IN_DOCKER_ENABLED
+ value: "true"
+ resources:
+ requests:
+ memory: "4000Mi"
+ cpu: 2000m
+ volumeMounts:
+ # dind expects /var/lib/docker to be volume
+ - name: docker-root
+ mountPath: /var/lib/docker
+ volumes:
+ - name: docker-root
+ emptyDir: {}
+'''
+
+// Able to override default values in Jenkins job via environment variables.
+if (!env.DEFAULT_GIT_REF) {
+ env.DEFAULT_GIT_REF = "master"
+}
+
+if (!env.DEFAULT_GINKGO_NODES) {
+ env.DEFAULT_GINKGO_NODES = "8"
+}
+
+if (!env.DEFAULT_E2E_ARGS) {
+ env.DEFAULT_E2E_ARGS = "--ginkgo.skip='\\[Serial\\]|\\[Stability\\]' --ginkgo.focus='\\[tidb-operator\\]'"
+}
+
+if (!env.DEFAULT_CLUSTER) {
+ env.DEFAULT_CLUSTER = "jenkins-tidb-operator-e2e"
+}
+
+if (!env.DEFAULT_GCP_PROJECT) {
+ env.DEFAULT_GCP_PROJECT = ""
+}
+
+if (!env.DEFAULT_GCP_ZONE) {
+ env.DEFAULT_GCP_ZONE = "us-central1-b"
+}
+
+pipeline {
+ agent {
+ kubernetes {
+ yaml podYAML
+ defaultContainer "main"
+ customWorkspace "/home/jenkins/agent/workspace/go/src/github.com/pingcap/tidb-operator"
+ }
+ }
+
+ options {
+ timeout(time: 3, unit: 'HOURS')
+ }
+
+ parameters {
+ string(name: 'GIT_URL', defaultValue: 'git@github.com:pingcap/tidb-operator.git', description: 'git repo url')
+ string(name: 'GIT_REF', defaultValue: env.DEFAULT_GIT_REF, description: 'git ref spec to checkout, e.g. master, release-1.1')
+ string(name: 'PR_ID', defaultValue: '', description: 'pull request ID, this will override GIT_REF if set, e.g. 1889')
+ string(name: 'GINKGO_NODES', defaultValue: env.DEFAULT_GINKGO_NODES, description: 'the number of ginkgo nodes')
+ string(name: 'E2E_ARGS', defaultValue: env.DEFAULT_E2E_ARGS, description: "e2e args, e.g. --ginkgo.focus='\\[Stability\\]'")
+ string(name: 'CLUSTER', defaultValue: env.DEFAULT_CLUSTER, description: 'the name of the cluster')
+ string(name: 'GCP_PROJECT', defaultValue: env.DEFAULT_GCP_PROJECT, description: 'the GCP project ID')
+ string(name: 'GCP_ZONE', defaultValue: env.DEFAULT_GCP_ZONE, description: 'the GCP zone')
+ }
+
+ environment {
+ GIT_REF = ''
+ ARTIFACTS = "${env.WORKSPACE}/artifacts"
+ }
+
+ stages {
+ stage("Prepare") {
+ steps {
+ // The declarative model for Jenkins Pipelines has a restricted
+ // subset of syntax that it allows in the stage blocks. We use
+ // script step to bypass the restriction.
+ // https://jenkins.io/doc/book/pipeline/syntax/#script
+ script {
+ GIT_REF = params.GIT_REF
+ if (params.PR_ID != "") {
+ GIT_REF = "refs/remotes/origin/pr/${params.PR_ID}/head"
+ }
+ }
+ echo "env.NODE_NAME: ${env.NODE_NAME}"
+ echo "env.WORKSPACE: ${env.WORKSPACE}"
+ echo "GIT_REF: ${GIT_REF}"
+ echo "ARTIFACTS: ${ARTIFACTS}"
+ }
+ }
+
+ stage("Checkout") {
+ steps {
+ checkout scm: [
+ $class: 'GitSCM',
+ branches: [[name: GIT_REF]],
+ userRemoteConfigs: [[
+ credentialsId: 'github-sre-bot-ssh',
+ refspec: '+refs/heads/*:refs/remotes/origin/* +refs/pull/*:refs/remotes/origin/pr/*',
+ url: "${params.GIT_URL}",
+ ]]
+ ]
+ }
+ }
+
+ stage("Run") {
+ steps {
+ withCredentials([
+ file(credentialsId: 'TIDB_OPERATOR_GCP_CREDENTIALS', variable: 'GCP_CREDENTIALS'),
+ file(credentialsId: 'TIDB_OPERATOR_GCP_SSH_PRIVATE_KEY', variable: 'GCP_SSH_PRIVATE_KEY'),
+ file(credentialsId: 'TIDB_OPERATOR_GCP_SSH_PUBLIC_KEY', variable: 'GCP_SSH_PUBLIC_KEY'),
+ ]) {
+ sh """
+ #!/bin/bash
+ export PROVIDER=gke
+ export CLUSTER=${params.CLUSTER}
+ export GCP_ZONE=${params.GCP_ZONE}
+ export GCP_PROJECT=${params.GCP_PROJECT}
+ export GINKGO_NODES=${params.GINKGO_NODES}
+ export REPORT_DIR=${ARTIFACTS}
+ echo "info: try to clean the cluster created previously"
+ SKIP_BUILD=y SKIP_IMAGE_BUILD=y SKIP_UP=y SKIP_TEST=y ./hack/e2e.sh
+ echo "info: begin to run e2e"
+ ./hack/e2e.sh -- ${params.E2E_ARGS}
+ """
+ }
+ }
+ }
+ }
+
+ post {
+ always {
+ dir(ARTIFACTS) {
+ archiveArtifacts artifacts: "**", allowEmptyArchive: true
+ junit testResults: "*.xml", allowEmptyResults: true
+ }
+ }
+ }
+}
+
+// vim: et sw=4 ts=4
diff --git a/ci/e2e_kind.groovy b/ci/e2e_kind.groovy
new file mode 100644
index 0000000000..245b75a3b8
--- /dev/null
+++ b/ci/e2e_kind.groovy
@@ -0,0 +1,220 @@
+//
+// Jenkins pipeline for Kind e2e job.
+//
+// This script is written in declarative syntax. Refer to
+// https://jenkins.io/doc/book/pipeline/syntax/ for more details.
+//
+// Note that parameters of the job is configured in this script.
+//
+
+import groovy.transform.Field
+
+@Field
+def podYAML = '''
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ app: tidb-operator-e2e
+spec:
+ containers:
+ - name: main
+ image: gcr.io/k8s-testimages/kubekins-e2e:v20200311-1e25827-master
+ command:
+ - runner.sh
+ # Clean containers on TERM signal in root process to avoid cgroup leaking.
+ # https://github.com/pingcap/tidb-operator/issues/1603#issuecomment-582402196
+ - exec
+ - bash
+ - -c
+ - |
+ function clean() {
+ echo "info: clean all containers to avoid cgroup leaking"
+ docker kill $(docker ps -q) || true
+ docker system prune -af || true
+ }
+ trap clean TERM
+ sleep 1d & wait
+ # we need privileged mode in order to do docker in docker
+ securityContext:
+ privileged: true
+ env:
+ - name: DOCKER_IN_DOCKER_ENABLED
+ value: "true"
+ resources:
+ requests:
+ memory: "8000Mi"
+ cpu: 8000m
+ ephemeral-storage: "50Gi"
+ limits:
+ memory: "8000Mi"
+ cpu: 8000m
+ ephemeral-storage: "50Gi"
+ # kind needs /lib/modules and cgroups from the host
+ volumeMounts:
+ - mountPath: /lib/modules
+ name: modules
+ readOnly: true
+ - mountPath: /sys/fs/cgroup
+ name: cgroup
+ # dind expects /var/lib/docker to be volume
+ - name: docker-root
+ mountPath: /var/lib/docker
+ # legacy docker path for cr.io/k8s-testimages/kubekins-e2e
+ - name: docker-graph
+ mountPath: /docker-graph
+ volumes:
+ - name: modules
+ hostPath:
+ path: /lib/modules
+ type: Directory
+ - name: cgroup
+ hostPath:
+ path: /sys/fs/cgroup
+ type: Directory
+ - name: docker-root
+ emptyDir: {}
+ - name: docker-graph
+ emptyDir: {}
+ tolerations:
+ - effect: NoSchedule
+ key: tidb-operator
+ operator: Exists
+ affinity:
+ # running on nodes for tidb-operator only
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: ci.pingcap.com
+ operator: In
+ values:
+ - tidb-operator
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - tidb-operator-e2e
+ topologyKey: kubernetes.io/hostname
+'''
+
+// Able to override default values in Jenkins job via environment variables.
+if (!env.DEFAULT_GIT_REF) {
+ env.DEFAULT_GIT_REF = "master"
+}
+
+if (!env.DEFAULT_GINKGO_NODES) {
+ env.DEFAULT_GINKGO_NODES = "8"
+}
+
+if (!env.DEFAULT_E2E_ARGS) {
+ env.DEFAULT_E2E_ARGS = ""
+}
+
+if (!env.DEFAULT_DOCKER_IO_MIRROR) {
+ env.DEFAULT_DOCKER_IO_MIRROR = ""
+}
+
+if (!env.DEFAULT_QUAY_IO_MIRROR) {
+ env.DEFAULT_QUAY_IO_MIRROR = ""
+}
+
+if (!env.DEFAULT_GCR_IO_MIRROR) {
+ env.DEFAULT_GCR_IO_MIRROR = ""
+}
+
+pipeline {
+ agent {
+ kubernetes {
+ yaml podYAML
+ defaultContainer "main"
+ customWorkspace "/home/jenkins/agent/workspace/go/src/github.com/pingcap/tidb-operator"
+ }
+ }
+
+ options {
+ timeout(time: 3, unit: 'HOURS')
+ }
+
+ parameters {
+ string(name: 'GIT_URL', defaultValue: 'git@github.com:pingcap/tidb-operator.git', description: 'git repo url')
+ string(name: 'GIT_REF', defaultValue: env.DEFAULT_GIT_REF, description: 'git ref spec to checkout, e.g. master, release-1.1')
+ string(name: 'PR_ID', defaultValue: '', description: 'pull request ID, this will override GIT_REF if set, e.g. 1889')
+ string(name: 'GINKGO_NODES', defaultValue: env.DEFAULT_GINKGO_NODES, description: 'the number of ginkgo nodes')
+ string(name: 'E2E_ARGS', defaultValue: env.DEFAULT_E2E_ARGS, description: "e2e args, e.g. --ginkgo.focus='\\[Stability\\]'")
+ string(name: 'DOCKER_IO_MIRROR', defaultValue: env.DEFAULT_DOCKER_IO_MIRROR, description: "docker mirror for docker.io")
+ string(name: 'QUAY_IO_MIRROR', defaultValue: env.DEFAULT_QUAY_IO_MIRROR, description: "mirror for quay.io")
+ string(name: 'GCR_IO_MIRROR', defaultValue: env.DEFAULT_GCR_IO_MIRROR, description: "mirror for gcr.io")
+ }
+
+ environment {
+ GIT_REF = ''
+ ARTIFACTS = "${env.WORKSPACE}/artifacts"
+ }
+
+ stages {
+ stage("Prepare") {
+ steps {
+ // The declarative model for Jenkins Pipelines has a restricted
+ // subset of syntax that it allows in the stage blocks. We use
+ // script step to bypass the restriction.
+ // https://jenkins.io/doc/book/pipeline/syntax/#script
+ script {
+ GIT_REF = params.GIT_REF
+ if (params.PR_ID != "") {
+ GIT_REF = "refs/remotes/origin/pr/${params.PR_ID}/head"
+ }
+ }
+ echo "env.NODE_NAME: ${env.NODE_NAME}"
+ echo "env.WORKSPACE: ${env.WORKSPACE}"
+ echo "GIT_REF: ${GIT_REF}"
+ echo "ARTIFACTS: ${ARTIFACTS}"
+ }
+ }
+
+ stage("Checkout") {
+ steps {
+ checkout scm: [
+ $class: 'GitSCM',
+ branches: [[name: GIT_REF]],
+ userRemoteConfigs: [[
+ credentialsId: 'github-sre-bot-ssh',
+ refspec: '+refs/heads/*:refs/remotes/origin/* +refs/pull/*:refs/remotes/origin/pr/*',
+ url: "${params.GIT_URL}",
+ ]]
+ ]
+ }
+ }
+
+ stage("Run") {
+ steps {
+ sh """
+ #!/bin/bash
+ export GINKGO_NODES=${params.GINKGO_NODES}
+ export REPORT_DIR=${ARTIFACTS}
+ export DOCKER_IO_MIRROR=${params.DOCKER_IO_MIRROR}
+ export QUAY_IO_MIRROR=${params.QUAY_IO_MIRROR}
+ export GCR_IO_MIRROR=${params.GCR_IO_MIRROR}
+ echo "info: begin to run e2e"
+ ./hack/e2e.sh -- ${params.E2E_ARGS}
+ """
+ }
+ }
+ }
+
+ post {
+ always {
+ dir(ARTIFACTS) {
+ archiveArtifacts artifacts: "**", allowEmptyArchive: true
+ junit testResults: "*.xml", allowEmptyResults: true
+ }
+ }
+ }
+}
+
+// vim: et sw=4 ts=4
diff --git a/ci/pingcap_tidb_operator_build_kind.groovy b/ci/pingcap_tidb_operator_build_kind.groovy
index ccc176c26f..6ff759fa60 100644
--- a/ci/pingcap_tidb_operator_build_kind.groovy
+++ b/ci/pingcap_tidb_operator_build_kind.groovy
@@ -14,7 +14,7 @@ metadata:
spec:
containers:
- name: main
- image: gcr.io/k8s-testimages/kubekins-e2e:v20191108-9467d02-master
+ image: gcr.io/k8s-testimages/kubekins-e2e:v20200311-1e25827-master
command:
- runner.sh
# Clean containers on TERM signal in root process to avoid cgroup leaking.
@@ -191,16 +191,17 @@ def call(BUILD_BRANCH, CREDENTIALS_ID, CODECOV_CREDENTIALS_ID) {
}
}
- stage("Check") {
- ansiColor('xterm') {
- sh """
- export GOPATH=${WORKSPACE}/go
- export PATH=${WORKSPACE}/go/bin:\$PATH
- make check-setup
- make check
- """
- }
- }
+ // moved to Github Actions
+ // stage("Check") {
+ // ansiColor('xterm') {
+ // sh """
+ // export GOPATH=${WORKSPACE}/go
+ // export PATH=${WORKSPACE}/go/bin:\$PATH
+ // make check-setup
+ // make check
+ // """
+ // }
+ // }
stage("Build and Test") {
ansiColor('xterm') {
@@ -209,10 +210,10 @@ def call(BUILD_BRANCH, CREDENTIALS_ID, CODECOV_CREDENTIALS_ID) {
make e2e-build
if [ ${BUILD_BRANCH} == "master" ]
then
- make test GO_COVER=y
+ make test GOFLAGS='-race' GO_COVER=y
curl -s https://codecov.io/bash | bash -s - -t ${CODECOV_TOKEN} || echo 'Codecov did not collect coverage reports'
else
- make test
+ make test GOFLAGS='-race'
fi
"""
}
@@ -237,40 +238,40 @@ def call(BUILD_BRANCH, CREDENTIALS_ID, CODECOV_CREDENTIALS_ID) {
def MIRRORS = "DOCKER_IO_MIRROR=http://172.16.4.143:5000 QUAY_IO_MIRROR=http://172.16.4.143:5001"
def builds = [:]
builds["E2E v1.12.10"] = {
- build("${MIRRORS} IMAGE_TAG=${GITHASH} SKIP_BUILD=y GINKGO_NODES=8 KUBE_VERSION=v1.12.10 REPORT_DIR=\$(pwd)/artifacts REPORT_PREFIX=v1.12.10_ ./hack/e2e.sh -- --preload-images --ginkgo.skip='\\[Serial\\]'", artifacts)
+ build("${MIRRORS} RUNNER_SUITE_NAME=e2e-v1.12 IMAGE_TAG=${GITHASH} SKIP_BUILD=y GINKGO_NODES=6 KUBE_VERSION=v1.12.10 REPORT_DIR=\$(pwd)/artifacts REPORT_PREFIX=v1.12.10_ ./hack/e2e.sh -- --preload-images --operator-killer", artifacts)
}
builds["E2E v1.12.10 AdvancedStatefulSet"] = {
- build("${MIRRORS} IMAGE_TAG=${GITHASH} SKIP_BUILD=y GINKGO_NODES=8 KUBE_VERSION=v1.12.10 REPORT_DIR=\$(pwd)/artifacts REPORT_PREFIX=v1.12.10_advanced_statefulset ./hack/e2e.sh -- --preload-images --ginkgo.skip='\\[Serial\\]' --operator-features AdvancedStatefulSet=true", artifacts)
+ build("${MIRRORS} RUNNER_SUITE_NAME=e2e-v1.12-advanced-statefulset IMAGE_TAG=${GITHASH} SKIP_BUILD=y GINKGO_NODES=6 KUBE_VERSION=v1.12.10 REPORT_DIR=\$(pwd)/artifacts REPORT_PREFIX=v1.12.10_advanced_statefulset ./hack/e2e.sh -- --preload-images --operator-features AdvancedStatefulSet=true", artifacts)
}
- builds["E2E v1.17.0"] = {
- build("${MIRRORS} IMAGE_TAG=${GITHASH} SKIP_BUILD=y GINKGO_NODES=8 KUBE_VERSION=v1.17.0 REPORT_DIR=\$(pwd)/artifacts REPORT_PREFIX=v1.17.0_ ./hack/e2e.sh -- -preload-images --ginkgo.skip='\\[Serial\\]'", artifacts)
+ builds["E2E v1.18.0"] = {
+ build("${MIRRORS} RUNNER_SUITE_NAME=e2e-v1.18 IMAGE_TAG=${GITHASH} SKIP_BUILD=y GINKGO_NODES=6 KUBE_VERSION=v1.18.0 REPORT_DIR=\$(pwd)/artifacts REPORT_PREFIX=v1.18.0_ ./hack/e2e.sh -- -preload-images --operator-killer", artifacts)
}
builds["E2E v1.12.10 Serial"] = {
- build("${MIRRORS} IMAGE_TAG=${GITHASH} SKIP_BUILD=y KUBE_VERSION=v1.12.10 REPORT_DIR=\$(pwd)/artifacts REPORT_PREFIX=v1.12.10_serial_ ./hack/e2e.sh -- --preload-images --ginkgo.focus='\\[Serial\\]' --install-operator=false", artifacts)
+ build("${MIRRORS} RUNNER_SUITE_NAME=e2e-v1.12-serial IMAGE_TAG=${GITHASH} SKIP_BUILD=y KUBE_VERSION=v1.12.10 REPORT_DIR=\$(pwd)/artifacts REPORT_PREFIX=v1.12.10_serial_ ./hack/e2e.sh -- --preload-images --ginkgo.focus='\\[Serial\\]' --install-operator=false", artifacts)
}
builds.failFast = false
parallel builds
- // we requires ~/bin/config.cfg, filemgr-linux64 utilities on k8s-kind node
- // TODO make it possible to run on any node
- node('k8s-kind') {
- dir("${PROJECT_DIR}"){
- deleteDir()
- unstash 'tidb-operator'
- if ( !(BUILD_BRANCH ==~ /[a-z0-9]{40}/) ) {
- stage('upload tidb-operator, backup-manager binary and charts'){
- //upload binary and charts
- sh """
- cp ~/bin/config.cfg ./
- tar -zcvf tidb-operator.tar.gz images/tidb-operator images/backup-manager charts
- filemgr-linux64 --action mput --bucket pingcap-dev --nobar --key builds/pingcap/operator/${GITHASH}/centos7/tidb-operator.tar.gz --file tidb-operator.tar.gz
- """
- //update refs
- writeFile file: 'sha1', text: "${GITHASH}"
- sh """
- filemgr-linux64 --action mput --bucket pingcap-dev --nobar --key refs/pingcap/operator/${BUILD_BRANCH}/centos7/sha1 --file sha1
- rm -f sha1 tidb-operator.tar.gz config.cfg
- """
+ if ( !(BUILD_BRANCH ==~ /[a-z0-9]{40}/) ) {
+ node('build_go1130_memvolume') {
+ container("golang") {
+ def WORKSPACE = pwd()
+ dir("${PROJECT_DIR}") {
+ unstash 'tidb-operator'
+ stage('upload tidb-operator binaries and charts'){
+ withCredentials([
+ string(credentialsId: 'UCLOUD_PUBLIC_KEY', variable: 'UCLOUD_PUBLIC_KEY'),
+ string(credentialsId: 'UCLOUD_PRIVATE_KEY', variable: 'UCLOUD_PRIVATE_KEY'),
+ ]) {
+ sh """
+ export UCLOUD_UFILE_PROXY_HOST=mainland-hk.ufileos.com
+ export UCLOUD_UFILE_BUCKET=pingcap-dev
+ export BUILD_BRANCH=${BUILD_BRANCH}
+ export GITHASH=${GITHASH}
+ ./ci/upload-binaries-charts.sh
+ """
+ }
+ }
}
}
}
diff --git a/ci/release_tidb_operator_binary_and_image.groovy b/ci/release_tidb_operator_binary_and_image.groovy
index 8b8b117b72..0850b68fa0 100644
--- a/ci/release_tidb_operator_binary_and_image.groovy
+++ b/ci/release_tidb_operator_binary_and_image.groovy
@@ -30,8 +30,8 @@ def call(BUILD_BRANCH, RELEASE_TAG, CREDENTIALS_ID, CHART_ITEMS) {
stage('Push tidb-backup-manager Docker Image'){
withDockerServer([uri: "${env.DOCKER_HOST}"]) {
- docker.build("uhub.service.ucloud.cn/pingcap/backup-manager:${RELEASE_TAG}", "images/backup-manager").push()
- docker.build("pingcap/backup-manager:${RELEASE_TAG}", "images/backup-manager").push()
+ docker.build("uhub.service.ucloud.cn/pingcap/tidb-backup-manager:${RELEASE_TAG}", "images/tidb-backup-manager").push()
+ docker.build("pingcap/tidb-backup-manager:${RELEASE_TAG}", "images/tidb-backup-manager").push()
}
}
@@ -94,8 +94,8 @@ def call(BUILD_BRANCH, RELEASE_TAG, CREDENTIALS_ID, CHART_ITEMS) {
slackmsg = "${slackmsg}" + "\n" +
"tidb-operator Docker Image: `pingcap/tidb-operator:${RELEASE_TAG}`" + "\n" +
"tidb-operator Docker Image: `uhub.ucloud.cn/pingcap/tidb-operator:${RELEASE_TAG}`" + "\n" +
- "backup-manager Docker Image: `pingcap/backup-manager:${RELEASE_TAG}`" + "\n" +
- "backup-manager Docker Image: `uhub.ucloud.cn/pingcap/backup-manager:${RELEASE_TAG}`"
+ "tidb-backup-manager Docker Image: `pingcap/tidb-backup-manager:${RELEASE_TAG}`" + "\n" +
+ "tidb-backup-manager Docker Image: `uhub.ucloud.cn/pingcap/tidb-backup-manager:${RELEASE_TAG}`"
for(String chartItem : CHART_ITEMS.split(' ')){
diff --git a/ci/run-in-vm.sh b/ci/run-in-vm.sh
new file mode 100755
index 0000000000..1dfac69a13
--- /dev/null
+++ b/ci/run-in-vm.sh
@@ -0,0 +1,162 @@
+#!/bin/bash
+
+# Copyright 2020 PingCAP, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This is a helper script to start a VM and run command in it.
+#
+# TODO create an isolated network
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd)
+cd $ROOT
+
+source "${ROOT}/hack/lib.sh"
+
+GCP_CREDENTIALS=${GCP_CREDENTIALS:-}
+GCP_PROJECT=${GCP_PROJECT:-}
+GCP_ZONE=${GCP_ZONE:-}
+GCP_SSH_PRIVATE_KEY=${GCP_SSH_PRIVATE_KEY:-}
+GCP_SSH_PUBLIC_KEY=${GCP_SSH_PUBLIC_KEY:-}
+NAME=${NAME:-tidb-operator-e2e}
+SSH_USER=${SSH_USER:-vagrant}
+GIT_URL=${GIT_URL:-https://github.com/pingcap/tidb-operator}
+GIT_REF=${GIT_REF:-origin/master}
+SYNC_FILES=${SYNC_FILES:-}
+
+echo "GCP_CREDENTIALS: $GCP_CREDENTIALS"
+echo "GCP_PROJECT: $GCP_PROJECT"
+echo "GCP_ZONE: $GCP_ZONE"
+echo "GCP_SSH_PRIVATE_KEY: $GCP_SSH_PRIVATE_KEY"
+echo "GCP_SSH_PUBLIC_KEY: $GCP_SSH_PUBLIC_KEY"
+echo "NAME: $NAME"
+echo "GIT_URL: $GIT_URL"
+echo "GIT_REF: $GIT_REF"
+echo "SYNC_FILES: $SYNC_FILES"
+
+# Pre-created nested virtualization enabled image with following commands:
+#
+# gcloud compute disks create disk1 --image-project centos-cloud --image-family centos-8 --zone us-central1-b
+# gcloud compute images create centos-8-nested-vm \
+# --source-disk disk1 --source-disk-zone us-central1-b \
+# --licenses "https://compute.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx"
+# gcloud compute disks delete disk1
+#
+# Refer to
+# https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances
+# for more details.
+IMAGE=centos-8-nested-vm
+
+echo "info: configure gcloud"
+if [ -z "$GCP_PROJECT" ]; then
+ echo "error: GCP_PROJECT is required"
+ exit 1
+fi
+if [ -z "$GCP_CREDENTIALS" ]; then
+ echo "error: GCP_CREDENTIALS is required"
+ exit 1
+fi
+if [ -z "$GCP_ZONE" ]; then
+ echo "error: GCP_ZONE is required"
+ exit 1
+fi
+gcloud auth activate-service-account --key-file "$GCP_CREDENTIALS"
+gcloud config set core/project $GCP_PROJECT
+gcloud config set compute/zone $GCP_ZONE
+
+echo "info: preparing ssh keypairs for GCP"
+if [ ! -d ~/.ssh ]; then
+ mkdir ~/.ssh
+fi
+if [ ! -e ~/.ssh/google_compute_engine -a -n "$GCP_SSH_PRIVATE_KEY" ]; then
+ echo "Copying $GCP_SSH_PRIVATE_KEY to ~/.ssh/google_compute_engine" >&2
+ cp $GCP_SSH_PRIVATE_KEY ~/.ssh/google_compute_engine
+ chmod 0600 ~/.ssh/google_compute_engine
+fi
+if [ ! -e ~/.ssh/google_compute_engine.pub -a -n "$GCP_SSH_PUBLIC_KEY" ]; then
+ echo "Copying $GCP_SSH_PUBLIC_KEY to ~/.ssh/google_compute_engine.pub" >&2
+ cp $GCP_SSH_PUBLIC_KEY ~/.ssh/google_compute_engine.pub
+ chmod 0600 ~/.ssh/google_compute_engine.pub
+fi
+
+function gcloud_resource_exists() {
+ local args=($(tr -s '_' ' ' <<<"$1"))
+ unset args[$[${#args[@]}-1]]
+ local name="$2"
+ x=$(${args[@]} list --filter="name='$name'" --format='table[no-heading](name)' | wc -l)
+ [ "$x" -ge 1 ]
+}
+
+function gcloud_compute_instances_exists() {
+ gcloud_resource_exists ${FUNCNAME[0]} $@
+}
+
+function e2e::down() {
+ echo "info: tearing down"
+ if ! gcloud_compute_instances_exists $NAME; then
+ echo "info: instance '$NAME' does not exist, skipped"
+ return 0
+ fi
+ echo "info: deleting instance '$NAME'"
+ gcloud compute instances delete $NAME -q
+}
+
+function e2e::up() {
+ echo "info: setting up"
+ echo "info: creating instance '$NAME'"
+ gcloud compute instances create $NAME \
+ --machine-type n1-standard-8 \
+ --min-cpu-platform "Intel Haswell" \
+ --image $IMAGE \
+ --boot-disk-size 30GB \
+ --local-ssd interface=scsi
+}
+
+function e2e::test() {
+ echo "info: testing"
+ echo "info: waiting for the VM is ready"
+ hack::wait_for_success 60 3 "gcloud compute ssh $SSH_USER@$NAME --command 'uname -a'"
+ echo "info: syncing files $SYNC_FILES"
+ while IFS=$',' read -r line; do
+ IFS=':' read -r src dst <<< "$line"
+ if [ -z "$dst" ]; then
+ dst="$src"
+ fi
+ gcloud compute scp $src $SSH_USER@$NAME:$dst
+ done <<< "$SYNC_FILES"
+ local tmpfile=$(mktemp)
+ trap "rm -f $tmpfile" RETURN
+ cat < $tmpfile
+sudo yum install -y git
+cd \$HOME
+sudo rm -rf tidb-operator
+git init tidb-operator
+cd tidb-operator
+git fetch --depth 1 --tags --progress ${GIT_URL} +refs/heads/*:refs/remotes/origin/* +refs/pull/*:refs/remotes/origin/pr/* +refs/heads/*:refs/*
+GIT_COMMIT=\$(git rev-parse ${GIT_REF}^{commit})
+git checkout -f \${GIT_COMMIT}
+$@
+EOF
+ cat $tmpfile
+ gcloud compute scp $tmpfile $SSH_USER@$NAME:/tmp/e2e.sh
+ gcloud compute ssh $SSH_USER@$NAME --command "bash /tmp/e2e.sh"
+}
+
+e2e::down
+trap 'e2e::down' EXIT
+e2e::up
+e2e::test "$@"
diff --git a/ci/upload-binaries-charts.sh b/ci/upload-binaries-charts.sh
new file mode 100755
index 0000000000..e9c429365f
--- /dev/null
+++ b/ci/upload-binaries-charts.sh
@@ -0,0 +1,78 @@
+#!/usr/bin/env bash
+
+# Copyright 2020 PingCAP, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd)
+cd $ROOT
+
+source "${ROOT}/hack/lib.sh"
+
+UCLOUD_PUBLIC_KEY=${UCLOUD_PUBLIC_KEY:-}
+UCLOUD_PRIVATE_KEY=${UCLOUD_PRIVATE_KEY:-}
+UCLOUD_UFILE_PROXY_HOST=${UCLOUD_UFILE_PROXY_HOST:-}
+UCLOUD_UFILE_API_HOST=${UCLOUD_UFILE_API_HOST:-api.spark.ucloud.cn}
+UCLOUD_UFILE_BUCKET=${UCLOUD_UFILE_BUCKET:-}
+GITHASH=${GITHASH:-}
+BUILD_BRANCH=${BUILD_BRANCH:-}
+
+FILEMGR_URL="http://tools.ufile.ucloud.com.cn/filemgr-linux64.tar.gz"
+
+if [ -z "$UCLOUD_PUBLIC_KEY" -o -z "$UCLOUD_PRIVATE_KEY" -o -z "$UCLOUD_UFILE_PROXY_HOST" ]; then
+ echo "error: UCLOUD_PUBLIC_KEY/UCLOUD_PUBLIC_KEY/UCLOUD_UFILE_PROXY_HOST are required"
+ exit 1
+fi
+
+if [ -z "$UCLOUD_UFILE_BUCKET" ]; then
+ echo "error: UCLOUD_UFILE_BUCKET is required"
+ exit 1
+fi
+
+if [ -z "$GITHASH" -o -z "$BUILD_BRANCH" ]; then
+ echo "error: GITHASH/BUILD_BRANCH are required"
+ exit 1
+fi
+
+function upload() {
+ local dir=$(mktemp -d)
+ trap "test -d $dir && rm -rf $dir" RETURN
+
+ echo "info: create a temporary directory: $dir"
+
+ cat < $dir/config.cfg
+{
+ "public_key" : "${UCLOUD_PUBLIC_KEY}",
+ "private_key" : "${UCLOUD_PRIVATE_KEY}",
+ "proxy_host" : "${UCLOUD_UFILE_PROXY_HOST}",
+ "api_host" : "${UCLOUD_UFILE_API_HOST}"
+}
+EOF
+
+ echo "info: downloading filemgr from $FILEMGR_URL"
+ curl --retry 10 -L -s "$FILEMGR_URL" | tar --strip-components 2 -C $dir -xzvf - ./linux64/filemgr-linux64
+
+ echo "info: uploading charts and binaries"
+ tar -zcvf $dir/tidb-operator.tar.gz images/tidb-operator images/tidb-backup-manager charts
+ $dir/filemgr-linux64 --config $dir/config.cfg --action mput --bucket ${UCLOUD_UFILE_BUCKET} --nobar --key builds/pingcap/operator/${GITHASH}/centos7/tidb-operator.tar.gz --file $dir/tidb-operator.tar.gz
+
+ echo "info: update ref of branch '$BUILD_BRANCH'"
+ echo -n $GITHASH > $dir/sha1
+ $dir/filemgr-linux64 --config $dir/config.cfg --action mput --bucket ${UCLOUD_UFILE_BUCKET} --nobar --key refs/pingcap/operator/${BUILD_BRANCH}/centos7/sha1 --file $dir/sha1
+}
+
+# retry a few times until it succeeds, this can avoid temporary network flakes
+hack::wait_for_success 120 5 "upload"
diff --git a/ci/vm.groovy b/ci/vm.groovy
new file mode 100644
index 0000000000..64acda4de4
--- /dev/null
+++ b/ci/vm.groovy
@@ -0,0 +1,144 @@
+//
+// Jenkins pipeline for VM jobs.
+//
+// This script is written in declarative syntax. Refer to
+// https://jenkins.io/doc/book/pipeline/syntax/ for more details.
+//
+// Note that parameters of the job is configured in this script.
+//
+
+import groovy.transform.Field
+
+@Field
+def podYAML = '''
+apiVersion: v1
+kind: Pod
+spec:
+ containers:
+ - name: main
+ image: gcr.io/k8s-testimages/kubekins-e2e:v20200311-1e25827-master
+ command:
+ - runner.sh
+ - sleep
+ - 1d
+ # we need privileged mode in order to do docker in docker
+ securityContext:
+ privileged: true
+ env:
+ - name: DOCKER_IN_DOCKER_ENABLED
+ value: "true"
+ resources:
+ requests:
+ memory: "4000Mi"
+ cpu: 2000m
+ volumeMounts:
+ # dind expects /var/lib/docker to be volume
+ - name: docker-root
+ mountPath: /var/lib/docker
+ volumes:
+ - name: docker-root
+ emptyDir: {}
+'''
+
+// Able to override default values in Jenkins job via environment variables.
+
+if (!env.DEFAULT_GIT_URL) {
+ env.DEFAULT_GIT_URL = "https://github.com/pingcap/tidb-operator"
+}
+
+if (!env.DEFAULT_GIT_REF) {
+ env.DEFAULT_GIT_REF = "master"
+}
+
+if (!env.DEFAULT_GCP_PROJECT) {
+ env.DEFAULT_GCP_PROJECT = ""
+}
+
+if (!env.DEFAULT_GCP_ZONE) {
+ env.DEFAULT_GCP_ZONE = "us-central1-b"
+}
+
+if (!env.DEFAULT_NAME) {
+ env.DEFAULT_NAME = "tidb-operator-e2e"
+}
+
+pipeline {
+ agent {
+ kubernetes {
+ yaml podYAML
+ defaultContainer "main"
+ customWorkspace "/home/jenkins/agent/workspace/go/src/github.com/pingcap/tidb-operator"
+ }
+ }
+
+ options {
+ timeout(time: 3, unit: 'HOURS')
+ }
+
+ parameters {
+ string(name: 'GIT_URL', defaultValue: env.DEFAULT_GIT_URL, description: 'git repo url')
+ string(name: 'GIT_REF', defaultValue: env.DEFAULT_GIT_REF, description: 'git ref spec to checkout, e.g. master, release-1.1')
+ string(name: 'PR_ID', defaultValue: '', description: 'pull request ID, this will override GIT_REF if set, e.g. 1889')
+ string(name: 'GCP_PROJECT', defaultValue: env.DEFAULT_GCP_PROJECT, description: 'the GCP project ID')
+ string(name: 'GCP_ZONE', defaultValue: env.DEFAULT_GCP_ZONE, description: 'the GCP zone')
+ string(name: 'NAME', defaultValue: env.DEFAULT_NAME, description: 'the name of VM instance')
+ }
+
+ environment {
+ GIT_REF = ''
+ }
+
+ stages {
+ stage("Prepare") {
+ steps {
+ // The declarative model for Jenkins Pipelines has a restricted
+ // subset of syntax that it allows in the stage blocks. We use
+ // script step to bypass the restriction.
+ // https://jenkins.io/doc/book/pipeline/syntax/#script
+ script {
+ GIT_REF = params.GIT_REF
+ if (params.PR_ID != "") {
+ GIT_REF = "refs/remotes/origin/pr/${params.PR_ID}/head"
+ }
+ }
+ echo "env.NODE_NAME: ${env.NODE_NAME}"
+ echo "env.WORKSPACE: ${env.WORKSPACE}"
+ echo "GIT_REF: ${GIT_REF}"
+ }
+ }
+
+ stage("Checkout") {
+ steps {
+ checkout scm: [
+ $class: 'GitSCM',
+ branches: [[name: GIT_REF]],
+ userRemoteConfigs: [[
+ refspec: '+refs/heads/*:refs/remotes/origin/* +refs/pull/*:refs/remotes/origin/pr/*',
+ url: "${params.GIT_URL}",
+ ]]
+ ]
+ }
+ }
+
+ stage("Run") {
+ steps {
+ withCredentials([
+ file(credentialsId: 'TIDB_OPERATOR_GCP_CREDENTIALS', variable: 'GCP_CREDENTIALS'),
+ file(credentialsId: 'TIDB_OPERATOR_GCP_SSH_PRIVATE_KEY', variable: 'GCP_SSH_PRIVATE_KEY'),
+ file(credentialsId: 'TIDB_OPERATOR_GCP_SSH_PUBLIC_KEY', variable: 'GCP_SSH_PUBLIC_KEY'),
+ file(credentialsId: 'TIDB_OPERATOR_REDHAT_PULL_SECRET', variable: 'REDHAT_PULL_SECRET'),
+ ]) {
+ sh """
+ #!/bin/bash
+ export GIT_REF=${GIT_REF}
+ export SYNC_FILES=\$REDHAT_PULL_SECRET:/tmp/pull-secret.txt
+ # TODO make the command configurable
+ ./ci/run-in-vm.sh PULL_SECRET_FILE=/tmp/pull-secret.txt ./hack/e2e-openshift.sh
+ """
+ }
+ }
+ }
+ }
+}
+
+// vim: et sw=4 ts=4
diff --git a/cmd/admission-webhook/main.go b/cmd/admission-webhook/main.go
index 120b0f70a1..600f86c758 100644
--- a/cmd/admission-webhook/main.go
+++ b/cmd/admission-webhook/main.go
@@ -15,15 +15,17 @@ package main
import (
"flag"
+ "fmt"
"os"
"time"
"github.com/openshift/generic-admission-server/pkg/cmd"
-
"github.com/pingcap/tidb-operator/pkg/features"
"github.com/pingcap/tidb-operator/pkg/version"
"github.com/pingcap/tidb-operator/pkg/webhook"
+ "github.com/pingcap/tidb-operator/pkg/webhook/pod"
"k8s.io/component-base/logs"
+ "k8s.io/klog"
)
var (
@@ -51,9 +53,19 @@ func main() {
}
version.LogVersionInfo()
+ flag.CommandLine.VisitAll(func(flag *flag.Flag) {
+ klog.V(1).Infof("FLAG: --%s=%q", flag.Name, flag.Value)
+ })
+
ah := &webhook.AdmissionHook{
ExtraServiceAccounts: extraServiceAccounts,
EvictRegionLeaderTimeout: evictRegionLeaderTimeout,
}
+ ns := os.Getenv("NAMESPACE")
+ if len(ns) < 1 {
+ klog.Fatal("ENV NAMESPACE should be set.")
+ }
+ pod.AstsControllerServiceAccounts = fmt.Sprintf("system:serviceaccount:%s:advanced-statefulset-controller", ns)
+
cmd.RunAdmissionServer(ah)
}
diff --git a/cmd/backup-manager/app/backup/backup.go b/cmd/backup-manager/app/backup/backup.go
index 7d9f0729a5..8e3b3748cc 100644
--- a/cmd/backup-manager/app/backup/backup.go
+++ b/cmd/backup-manager/app/backup/backup.go
@@ -14,35 +14,46 @@
package backup
import (
+ "bufio"
"context"
"fmt"
"io"
+ "io/ioutil"
"os/exec"
+ "path"
+ "strings"
"github.com/gogo/protobuf/proto"
- glog "k8s.io/klog"
-
kvbackup "github.com/pingcap/kvproto/pkg/backup"
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/constants"
- "github.com/pingcap/tidb-operator/cmd/backup-manager/app/util"
+ backupUtil "github.com/pingcap/tidb-operator/cmd/backup-manager/app/util"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ "github.com/pingcap/tidb-operator/pkg/util"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/klog"
)
// Options contains the input arguments to the backup command
type Options struct {
- Namespace string
- BackupName string
-}
-
-func (bo *Options) String() string {
- return fmt.Sprintf("%s/%s", bo.Namespace, bo.BackupName)
+ backupUtil.GenericOptions
}
func (bo *Options) backupData(backup *v1alpha1.Backup) (string, error) {
- args, path, err := constructOptions(backup)
+ clusterNamespace := backup.Spec.BR.ClusterNamespace
+ if backup.Spec.BR.ClusterNamespace == "" {
+ clusterNamespace = backup.Namespace
+ }
+ args, remotePath, err := constructOptions(backup)
if err != nil {
return "", err
}
+ args = append(args, fmt.Sprintf("--pd=%s-pd.%s:2379", backup.Spec.BR.Cluster, clusterNamespace))
+ if bo.TLSCluster {
+ args = append(args, fmt.Sprintf("--ca=%s", path.Join(util.ClusterClientTLSPath, corev1.ServiceAccountRootCAKey)))
+ args = append(args, fmt.Sprintf("--cert=%s", path.Join(util.ClusterClientTLSPath, corev1.TLSCertKey)))
+ args = append(args, fmt.Sprintf("--key=%s", path.Join(util.ClusterClientTLSPath, corev1.TLSPrivateKeyKey)))
+ }
+
var btype string
if backup.Spec.Type == "" {
btype = string(v1alpha1.BackupTypeFull)
@@ -54,19 +65,53 @@ func (bo *Options) backupData(backup *v1alpha1.Backup) (string, error) {
btype,
}
fullArgs = append(fullArgs, args...)
- glog.Infof("Running br command with args: %v", fullArgs)
- output, err := exec.Command("br", fullArgs...).CombinedOutput()
+ klog.Infof("Running br command with args: %v", fullArgs)
+ bin := "br" + backupUtil.Suffix(bo.TiKVVersion)
+ cmd := exec.Command(bin, fullArgs...)
+
+ stdOut, err := cmd.StdoutPipe()
+ if err != nil {
+ return remotePath, fmt.Errorf("cluster %s, create stdout pipe failed, err: %v", bo, err)
+ }
+ stdErr, err := cmd.StderrPipe()
+ if err != nil {
+ return remotePath, fmt.Errorf("cluster %s, create stderr pipe failed, err: %v", bo, err)
+ }
+ err = cmd.Start()
if err != nil {
- return path, fmt.Errorf("cluster %s, execute br command %v failed, output: %s, err: %v", bo, fullArgs, string(output), err)
+ return remotePath, fmt.Errorf("cluster %s, execute br command failed, args: %s, err: %v", bo, fullArgs, err)
}
- glog.Infof("Backup data for cluster %s successfully, output: %s", bo, string(output))
- return path, nil
+ var errMsg string
+ reader := bufio.NewReader(stdOut)
+ for {
+ line, err := reader.ReadString('\n')
+ if strings.Contains(line, "[ERROR]") {
+ errMsg += line
+ }
+
+ klog.Infof(strings.Replace(line, "\n", "", -1))
+ if err != nil || io.EOF == err {
+ break
+ }
+ }
+ tmpErr, _ := ioutil.ReadAll(stdErr)
+ if len(tmpErr) > 0 {
+ klog.Infof(string(tmpErr))
+ errMsg += string(tmpErr)
+ }
+ err = cmd.Wait()
+ if err != nil {
+ return remotePath, fmt.Errorf("cluster %s, wait pipe message failed, errMsg %s, err: %v", bo, errMsg, err)
+ }
+
+ klog.Infof("Backup data for cluster %s successfully", bo)
+ return remotePath, nil
}
// getCommitTs get backup position from `EndVersion` in BR backup meta
func getCommitTs(backup *v1alpha1.Backup) (uint64, error) {
var commitTs uint64
- s, err := util.NewRemoteStorage(backup)
+ s, err := backupUtil.NewRemoteStorage(backup)
if err != nil {
return commitTs, err
}
@@ -94,9 +139,9 @@ func getCommitTs(backup *v1alpha1.Backup) (uint64, error) {
// constructOptions constructs options for BR and also return the remote path
func constructOptions(backup *v1alpha1.Backup) ([]string, string, error) {
- args, path, err := util.ConstructBRGlobalOptionsForBackup(backup)
+ args, remotePath, err := backupUtil.ConstructBRGlobalOptionsForBackup(backup)
if err != nil {
- return args, path, err
+ return args, remotePath, err
}
config := backup.Spec.BR
if config.Concurrency != nil {
@@ -111,13 +156,13 @@ func constructOptions(backup *v1alpha1.Backup) ([]string, string, error) {
if config.Checksum != nil {
args = append(args, fmt.Sprintf("--checksum=%t", *config.Checksum))
}
- return args, path, nil
+ return args, remotePath, nil
}
// getBackupSize get the backup data size from remote
func getBackupSize(backup *v1alpha1.Backup) (int64, error) {
var size int64
- s, err := util.NewRemoteStorage(backup)
+ s, err := backupUtil.NewRemoteStorage(backup)
if err != nil {
return size, err
}
diff --git a/cmd/backup-manager/app/backup/manager.go b/cmd/backup-manager/app/backup/manager.go
index 78f652126b..0e3f99ab2b 100644
--- a/cmd/backup-manager/app/backup/manager.go
+++ b/cmd/backup-manager/app/backup/manager.go
@@ -14,15 +14,20 @@
package backup
import (
+ "database/sql"
"fmt"
"time"
+ "github.com/pingcap/tidb-operator/cmd/backup-manager/app/constants"
+ "github.com/pingcap/tidb-operator/cmd/backup-manager/app/util"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ bkconstants "github.com/pingcap/tidb-operator/pkg/backup/constants"
listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- glog "k8s.io/klog"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/klog"
)
// Manager mainly used to manage backup related work
@@ -44,11 +49,29 @@ func NewManager(
}
}
+func (bm *Manager) setOptions(backup *v1alpha1.Backup) {
+ bm.Options.Host = backup.Spec.From.Host
+
+ if backup.Spec.From.Port != 0 {
+ bm.Options.Port = backup.Spec.From.Port
+ } else {
+ bm.Options.Port = bkconstants.DefaultTidbPort
+ }
+
+ if backup.Spec.From.User != "" {
+ bm.Options.User = backup.Spec.From.User
+ } else {
+ bm.Options.User = bkconstants.DefaultTidbUser
+ }
+
+ bm.Options.Password = util.GetOptionValueFromEnv(bkconstants.TidbPasswordKey, bkconstants.BackupManagerEnvVarPrefix)
+}
+
// ProcessBackup used to process the backup logic
func (bm *Manager) ProcessBackup() error {
- backup, err := bm.backupLister.Backups(bm.Namespace).Get(bm.BackupName)
+ backup, err := bm.backupLister.Backups(bm.Namespace).Get(bm.ResourceName)
if err != nil {
- glog.Errorf("can't find cluster %s backup %s CRD object, err: %v", bm, bm.BackupName, err)
+ klog.Errorf("can't find cluster %s backup %s CRD object, err: %v", bm, bm.ResourceName, err)
return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
Type: v1alpha1.BackupFailed,
Status: corev1.ConditionTrue,
@@ -60,10 +83,40 @@ func (bm *Manager) ProcessBackup() error {
if backup.Spec.BR == nil {
return fmt.Errorf("no br config in %s", bm)
}
- return bm.performBackup(backup.DeepCopy())
+
+ bm.setOptions(backup)
+
+ var db *sql.DB
+ var dsn string
+ err = wait.PollImmediate(constants.PollInterval, constants.CheckTimeout, func() (done bool, err error) {
+ dsn, err = bm.GetDSN(bm.TLSClient)
+ if err != nil {
+ klog.Errorf("can't get dsn of tidb cluster %s, err: %s", bm, err)
+ return false, err
+ }
+ db, err = util.OpenDB(dsn)
+ if err != nil {
+ klog.Warningf("can't connect to tidb cluster %s, err: %s", bm, err)
+ return false, nil
+ }
+ return true, nil
+ })
+
+ if err != nil {
+ klog.Errorf("cluster %s connect failed, err: %s", bm, err)
+ return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
+ Type: v1alpha1.BackupFailed,
+ Status: corev1.ConditionTrue,
+ Reason: "ConnectTidbFailed",
+ Message: err.Error(),
+ })
+ }
+
+ defer db.Close()
+ return bm.performBackup(backup.DeepCopy(), db)
}
-func (bm *Manager) performBackup(backup *v1alpha1.Backup) error {
+func (bm *Manager) performBackup(backup *v1alpha1.Backup, db *sql.DB) error {
started := time.Now()
err := bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
@@ -74,23 +127,101 @@ func (bm *Manager) performBackup(backup *v1alpha1.Backup) error {
return err
}
- backupFullPath, err := bm.backupData(backup)
+ oldTikvGCTime, err := bm.GetTikvGCLifeTime(db)
if err != nil {
- glog.Errorf("backup cluster %s data failed, err: %s", bm, err)
+ klog.Errorf("cluster %s get %s failed, err: %s", bm, constants.TikvGCVariable, err)
return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
Type: v1alpha1.BackupFailed,
Status: corev1.ConditionTrue,
- Reason: "BackupDataToRemoteFailed",
+ Reason: "GetTikvGCLifeTimeFailed",
Message: err.Error(),
})
}
- glog.Infof("backup cluster %s data to %s success", bm, backupFullPath)
+ klog.Infof("cluster %s %s is %s", bm, constants.TikvGCVariable, oldTikvGCTime)
+
+ oldTikvGCTimeDuration, err := time.ParseDuration(oldTikvGCTime)
+ if err != nil {
+ klog.Errorf("cluster %s parse old %s failed, err: %s", bm, constants.TikvGCVariable, err)
+ return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
+ Type: v1alpha1.BackupFailed,
+ Status: corev1.ConditionTrue,
+ Reason: "ParseOldTikvGCLifeTimeFailed",
+ Message: err.Error(),
+ })
+ }
+
+ var tikvGCTimeDuration time.Duration
+ var tikvGCLifeTime string
+ if backup.Spec.TikvGCLifeTime != nil {
+ tikvGCLifeTime = *backup.Spec.TikvGCLifeTime
+ tikvGCTimeDuration, err = time.ParseDuration(tikvGCLifeTime)
+ if err != nil {
+ klog.Errorf("cluster %s parse configured %s failed, err: %s", bm, constants.TikvGCVariable, err)
+ return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
+ Type: v1alpha1.BackupFailed,
+ Status: corev1.ConditionTrue,
+ Reason: "ParseConfiguredTikvGCLifeTimeFailed",
+ Message: err.Error(),
+ })
+ }
+ } else {
+ tikvGCLifeTime = constants.TikvGCLifeTime
+ tikvGCTimeDuration, err = time.ParseDuration(tikvGCLifeTime)
+ if err != nil {
+ klog.Errorf("cluster %s parse default %s failed, err: %s", bm, constants.TikvGCVariable, err)
+ return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
+ Type: v1alpha1.BackupFailed,
+ Status: corev1.ConditionTrue,
+ Reason: "ParseDefaultTikvGCLifeTimeFailed",
+ Message: err.Error(),
+ })
+ }
+ }
+
+ if oldTikvGCTimeDuration < tikvGCTimeDuration {
+ err = bm.SetTikvGCLifeTime(db, tikvGCLifeTime)
+ if err != nil {
+ klog.Errorf("cluster %s set tikv GC life time to %s failed, err: %s", bm, tikvGCLifeTime, err)
+ return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
+ Type: v1alpha1.BackupFailed,
+ Status: corev1.ConditionTrue,
+ Reason: "SetTikvGCLifeTimeFailed",
+ Message: err.Error(),
+ })
+ }
+ klog.Infof("set cluster %s %s to %s success", bm, constants.TikvGCVariable, tikvGCLifeTime)
+ }
+
+ backupFullPath, backupErr := bm.backupData(backup)
+ if oldTikvGCTimeDuration < tikvGCTimeDuration {
+ err = bm.SetTikvGCLifeTime(db, oldTikvGCTime)
+ if err != nil {
+ klog.Errorf("cluster %s reset tikv GC life time to %s failed, err: %s", bm, oldTikvGCTime, err)
+ return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
+ Type: v1alpha1.BackupFailed,
+ Status: corev1.ConditionTrue,
+ Reason: "ResetTikvGCLifeTimeFailed",
+ Message: err.Error(),
+ })
+ }
+ klog.Infof("reset cluster %s %s to %s success", bm, constants.TikvGCVariable, oldTikvGCTime)
+ }
+ if backupErr != nil {
+ klog.Errorf("backup cluster %s data failed, err: %s", bm, backupErr)
+ return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
+ Type: v1alpha1.BackupFailed,
+ Status: corev1.ConditionTrue,
+ Reason: "BackupDataToRemoteFailed",
+ Message: backupErr.Error(),
+ })
+ }
+ klog.Infof("backup cluster %s data to %s success", bm, backupFullPath)
// Note: The size get from remote may be incorrect because the blobs
// are eventually consistent.
size, err := getBackupSize(backup)
if err != nil {
- glog.Errorf("Get size for backup files in %s of cluster %s failed, err: %s", backupFullPath, bm, err)
+ klog.Errorf("Get size for backup files in %s of cluster %s failed, err: %s", backupFullPath, bm, err)
return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
Type: v1alpha1.BackupFailed,
Status: corev1.ConditionTrue,
@@ -98,11 +229,11 @@ func (bm *Manager) performBackup(backup *v1alpha1.Backup) error {
Message: err.Error(),
})
}
- glog.Infof("Get size %d for backup files in %s of cluster %s success", size, backupFullPath, bm)
+ klog.Infof("Get size %d for backup files in %s of cluster %s success", size, backupFullPath, bm)
commitTs, err := getCommitTs(backup)
if err != nil {
- glog.Errorf("get cluster %s commitTs failed, err: %s", bm, err)
+ klog.Errorf("get cluster %s commitTs failed, err: %s", bm, err)
return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
Type: v1alpha1.BackupFailed,
Status: corev1.ConditionTrue,
@@ -110,7 +241,7 @@ func (bm *Manager) performBackup(backup *v1alpha1.Backup) error {
Message: err.Error(),
})
}
- glog.Infof("get cluster %s commitTs %d success", bm, commitTs)
+ klog.Infof("get cluster %s commitTs %d success", bm, commitTs)
finish := time.Now()
diff --git a/cmd/backup-manager/app/backup_manager.go b/cmd/backup-manager/app/backup_manager.go
index 83b9c64c31..8800891446 100644
--- a/cmd/backup-manager/app/backup_manager.go
+++ b/cmd/backup-manager/app/backup_manager.go
@@ -26,7 +26,7 @@ func Run() error {
logs.InitLogs()
defer logs.FlushLogs()
- // fix glog parse error
+ // fix klog parse error
flag.CommandLine.Parse([]string{})
pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
diff --git a/cmd/backup-manager/app/clean/clean.go b/cmd/backup-manager/app/clean/clean.go
index 136866b0a9..c727319d58 100644
--- a/cmd/backup-manager/app/clean/clean.go
+++ b/cmd/backup-manager/app/clean/clean.go
@@ -19,7 +19,7 @@ import (
"io"
"os/exec"
- glog "k8s.io/klog"
+ "k8s.io/klog"
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/constants"
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/util"
@@ -53,12 +53,12 @@ func (bo *Options) cleanBRRemoteBackupData(backup *v1alpha1.Backup) error {
if err != nil {
return err
}
- glog.Infof("Prepare to delete %s for cluster %s", obj.Key, bo)
+ klog.Infof("Prepare to delete %s for cluster %s", obj.Key, bo)
err = s.Delete(context.Background(), obj.Key)
if err != nil {
return err
}
- glog.Infof("Delete %s for cluster %s successfully", obj.Key, bo)
+ klog.Infof("Delete %s for cluster %s successfully", obj.Key, bo)
}
return nil
}
@@ -70,6 +70,6 @@ func (bo *Options) cleanRemoteBackupData(bucket string) error {
return fmt.Errorf("cluster %s, execute rclone deletefile command failed, output: %s, err: %v", bo, string(output), err)
}
- glog.Infof("cluster %s backup %s was deleted successfully", bo, bucket)
+ klog.Infof("cluster %s backup %s was deleted successfully", bo, bucket)
return nil
}
diff --git a/cmd/backup-manager/app/clean/manager.go b/cmd/backup-manager/app/clean/manager.go
index 01380b5eec..e2e4061567 100644
--- a/cmd/backup-manager/app/clean/manager.go
+++ b/cmd/backup-manager/app/clean/manager.go
@@ -17,7 +17,7 @@ import (
"fmt"
corev1 "k8s.io/api/core/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1"
@@ -55,7 +55,7 @@ func (bm *Manager) ProcessCleanBackup() error {
func (bm *Manager) performCleanBackup(backup *v1alpha1.Backup) error {
if backup.Status.BackupPath == "" {
- glog.Errorf("cluster %s backup path is empty", bm)
+ klog.Errorf("cluster %s backup path is empty", bm)
return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
Type: v1alpha1.BackupFailed,
Status: corev1.ConditionTrue,
@@ -72,7 +72,7 @@ func (bm *Manager) performCleanBackup(backup *v1alpha1.Backup) error {
}
if err != nil {
- glog.Errorf("clean cluster %s backup %s failed, err: %s", bm, backup.Status.BackupPath, err)
+ klog.Errorf("clean cluster %s backup %s failed, err: %s", bm, backup.Status.BackupPath, err)
return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
Type: v1alpha1.BackupFailed,
Status: corev1.ConditionTrue,
@@ -81,7 +81,7 @@ func (bm *Manager) performCleanBackup(backup *v1alpha1.Backup) error {
})
}
- glog.Infof("clean cluster %s backup %s success", bm, backup.Status.BackupPath)
+ klog.Infof("clean cluster %s backup %s success", bm, backup.Status.BackupPath)
return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
Type: v1alpha1.BackupClean,
Status: corev1.ConditionTrue,
diff --git a/cmd/backup-manager/app/cmd/backup.go b/cmd/backup-manager/app/cmd/backup.go
index d0e2fce165..5c7bc86b21 100644
--- a/cmd/backup-manager/app/cmd/backup.go
+++ b/cmd/backup-manager/app/cmd/backup.go
@@ -23,7 +23,7 @@ import (
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/spf13/cobra"
"k8s.io/client-go/tools/cache"
- glog "k8s.io/klog"
+ "k8s.io/klog"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
)
@@ -41,7 +41,10 @@ func NewBackupCommand() *cobra.Command {
}
cmd.Flags().StringVar(&bo.Namespace, "namespace", "", "Backup CR's namespace")
- cmd.Flags().StringVar(&bo.BackupName, "backupName", "", "Backup CRD object name")
+ cmd.Flags().StringVar(&bo.ResourceName, "backupName", "", "Backup CRD object name")
+ cmd.Flags().StringVar(&bo.TiKVVersion, "tikvVersion", util.DefaultVersion, "TiKV version")
+ cmd.Flags().BoolVar(&bo.TLSClient, "client-tls", false, "Whether client tls is enabled")
+ cmd.Flags().BoolVar(&bo.TLSCluster, "cluster-tls", false, "Whether cluster tls is enabled")
return cmd
}
@@ -63,7 +66,7 @@ func runBackup(backupOpts backup.Options, kubecfg string) error {
// waiting for the shared informer's store has synced.
cache.WaitForCacheSync(ctx.Done(), backupInformer.Informer().HasSynced)
- glog.Infof("start to process backup %s", backupOpts.String())
+ klog.Infof("start to process backup %s", backupOpts.String())
bm := backup.NewManager(backupInformer.Lister(), statusUpdater, backupOpts)
return bm.ProcessBackup()
}
diff --git a/cmd/backup-manager/app/cmd/clean.go b/cmd/backup-manager/app/cmd/clean.go
index 35095223de..efff8cc92e 100644
--- a/cmd/backup-manager/app/cmd/clean.go
+++ b/cmd/backup-manager/app/cmd/clean.go
@@ -23,7 +23,7 @@ import (
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/spf13/cobra"
"k8s.io/client-go/tools/cache"
- glog "k8s.io/klog"
+ "k8s.io/klog"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
)
@@ -64,7 +64,7 @@ func runClean(backupOpts clean.Options, kubecfg string) error {
// waiting for the shared informer's store has synced.
cache.WaitForCacheSync(ctx.Done(), backupInformer.Informer().HasSynced)
- glog.Infof("start to clean backup %s", backupOpts.String())
+ klog.Infof("start to clean backup %s", backupOpts.String())
bm := clean.NewManager(backupInformer.Lister(), statusUpdater, backupOpts)
return bm.ProcessCleanBackup()
}
diff --git a/cmd/backup-manager/app/cmd/export.go b/cmd/backup-manager/app/cmd/export.go
index 437506c8d1..f4c19f2d63 100644
--- a/cmd/backup-manager/app/cmd/export.go
+++ b/cmd/backup-manager/app/cmd/export.go
@@ -21,18 +21,17 @@ import (
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/constants"
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/export"
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/util"
- bkconstants "github.com/pingcap/tidb-operator/pkg/backup/constants"
informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/spf13/cobra"
"k8s.io/client-go/tools/cache"
- glog "k8s.io/klog"
+ "k8s.io/klog"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
)
// NewExportCommand implements the backup command
func NewExportCommand() *cobra.Command {
- bo := export.BackupOpts{}
+ bo := export.Options{}
cmd := &cobra.Command{
Use: "export",
@@ -44,18 +43,13 @@ func NewExportCommand() *cobra.Command {
}
cmd.Flags().StringVar(&bo.Namespace, "namespace", "", "Backup CR's namespace")
- cmd.Flags().StringVar(&bo.Host, "host", "", "Tidb cluster access address")
- cmd.Flags().Int32Var(&bo.Port, "port", bkconstants.DefaultTidbPort, "Port number to use for connecting tidb cluster")
+ cmd.Flags().StringVar(&bo.ResourceName, "backupName", "", "Backup CRD object name")
cmd.Flags().StringVar(&bo.Bucket, "bucket", "", "Bucket in which to store the backup data")
- cmd.Flags().StringVar(&bo.Password, bkconstants.TidbPasswordKey, "", "Password to use when connecting to tidb cluster")
- cmd.Flags().StringVar(&bo.User, "user", "", "User for login tidb cluster")
cmd.Flags().StringVar(&bo.StorageType, "storageType", "", "Backend storage type")
- cmd.Flags().StringVar(&bo.BackupName, "backupName", "", "Backup CRD object name")
- util.SetFlagsFromEnv(cmd.Flags(), bkconstants.BackupManagerEnvVarPrefix)
return cmd
}
-func runExport(backupOpts export.BackupOpts, kubecfg string) error {
+func runExport(backupOpts export.Options, kubecfg string) error {
kubeCli, cli, err := util.NewKubeAndCRCli(kubecfg)
cmdutil.CheckErr(err)
options := []informers.SharedInformerOption{
@@ -73,7 +67,7 @@ func runExport(backupOpts export.BackupOpts, kubecfg string) error {
// waiting for the shared informer's store has synced.
cache.WaitForCacheSync(ctx.Done(), backupInformer.Informer().HasSynced)
- glog.Infof("start to process backup %s", backupOpts.String())
+ klog.Infof("start to process backup %s", backupOpts.String())
bm := export.NewBackupManager(backupInformer.Lister(), statusUpdater, backupOpts)
return bm.ProcessBackup()
}
diff --git a/cmd/backup-manager/app/cmd/import.go b/cmd/backup-manager/app/cmd/import.go
index 92aef28d18..89a796128a 100644
--- a/cmd/backup-manager/app/cmd/import.go
+++ b/cmd/backup-manager/app/cmd/import.go
@@ -21,18 +21,17 @@ import (
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/constants"
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/import"
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/util"
- bkconstants "github.com/pingcap/tidb-operator/pkg/backup/constants"
informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/spf13/cobra"
"k8s.io/client-go/tools/cache"
- glog "k8s.io/klog"
+ "k8s.io/klog"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
)
// NewImportCommand implements the restore command
func NewImportCommand() *cobra.Command {
- ro := _import.RestoreOpts{}
+ ro := _import.Options{}
cmd := &cobra.Command{
Use: "import",
@@ -44,17 +43,12 @@ func NewImportCommand() *cobra.Command {
}
cmd.Flags().StringVar(&ro.Namespace, "namespace", "", "Restore CR's namespace")
- cmd.Flags().StringVar(&ro.Host, "host", "", "Tidb cluster access address")
- cmd.Flags().Int32Var(&ro.Port, "port", bkconstants.DefaultTidbPort, "Port number to use for connecting tidb cluster")
- cmd.Flags().StringVar(&ro.Password, bkconstants.TidbPasswordKey, "", "Password to use when connecting to tidb cluster")
- cmd.Flags().StringVar(&ro.User, "user", "", "User for login tidb cluster")
- cmd.Flags().StringVar(&ro.RestoreName, "restoreName", "", "Restore CRD object name")
+ cmd.Flags().StringVar(&ro.ResourceName, "restoreName", "", "Restore CRD object name")
cmd.Flags().StringVar(&ro.BackupPath, "backupPath", "", "The location of the backup")
- util.SetFlagsFromEnv(cmd.Flags(), bkconstants.BackupManagerEnvVarPrefix)
return cmd
}
-func runImport(restoreOpts _import.RestoreOpts, kubecfg string) error {
+func runImport(restoreOpts _import.Options, kubecfg string) error {
kubeCli, cli, err := util.NewKubeAndCRCli(kubecfg)
cmdutil.CheckErr(err)
options := []informers.SharedInformerOption{
@@ -72,7 +66,7 @@ func runImport(restoreOpts _import.RestoreOpts, kubecfg string) error {
// waiting for the shared informer's store has synced.
cache.WaitForCacheSync(ctx.Done(), restoreInformer.Informer().HasSynced)
- glog.Infof("start to process restore %s", restoreOpts.String())
+ klog.Infof("start to process restore %s", restoreOpts.String())
rm := _import.NewRestoreManager(restoreInformer.Lister(), statusUpdater, restoreOpts)
return rm.ProcessRestore()
}
diff --git a/cmd/backup-manager/app/cmd/restore.go b/cmd/backup-manager/app/cmd/restore.go
index 50152afee2..fa383c2e5c 100644
--- a/cmd/backup-manager/app/cmd/restore.go
+++ b/cmd/backup-manager/app/cmd/restore.go
@@ -21,12 +21,11 @@ import (
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/constants"
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/restore"
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/util"
- bkconstants "github.com/pingcap/tidb-operator/pkg/backup/constants"
informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/spf13/cobra"
"k8s.io/client-go/tools/cache"
- glog "k8s.io/klog"
+ "k8s.io/klog"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
)
@@ -44,8 +43,10 @@ func NewRestoreCommand() *cobra.Command {
}
cmd.Flags().StringVar(&ro.Namespace, "namespace", "", "Restore CR's namespace")
- cmd.Flags().StringVar(&ro.RestoreName, "restoreName", "", "Restore CRD object name")
- util.SetFlagsFromEnv(cmd.Flags(), bkconstants.BackupManagerEnvVarPrefix)
+ cmd.Flags().StringVar(&ro.ResourceName, "restoreName", "", "Restore CRD object name")
+ cmd.Flags().StringVar(&ro.TiKVVersion, "tikvVersion", util.DefaultVersion, "TiKV version")
+ cmd.Flags().BoolVar(&ro.TLSClient, "client-tls", false, "Whether client tls is enabled")
+ cmd.Flags().BoolVar(&ro.TLSCluster, "cluster-tls", false, "Whether cluster tls is enabled")
return cmd
}
@@ -67,7 +68,7 @@ func runRestore(restoreOpts restore.Options, kubecfg string) error {
// waiting for the shared informer's store has synced.
cache.WaitForCacheSync(ctx.Done(), restoreInformer.Informer().HasSynced)
- glog.Infof("start to process restore %s", restoreOpts.String())
+ klog.Infof("start to process restore %s", restoreOpts.String())
rm := restore.NewManager(restoreInformer.Lister(), statusUpdater, restoreOpts)
return rm.ProcessRestore()
}
diff --git a/cmd/backup-manager/app/constants/constants.go b/cmd/backup-manager/app/constants/constants.go
index 24b5ad7d30..c471181138 100644
--- a/cmd/backup-manager/app/constants/constants.go
+++ b/cmd/backup-manager/app/constants/constants.go
@@ -56,4 +56,10 @@ const (
// MetaFile is the file name for meta data of backup with BR
MetaFile = "backupmeta"
+
+ // BR certificate storage path
+ BRCertPath = "/var/lib/br-tls"
+
+ // ServiceAccountCAPath is where is CABundle of serviceaccount locates
+ ServiceAccountCAPath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
)
diff --git a/cmd/backup-manager/app/export/export.go b/cmd/backup-manager/app/export/export.go
index 194463ef62..fe9d37e40b 100644
--- a/cmd/backup-manager/app/export/export.go
+++ b/cmd/backup-manager/app/export/export.go
@@ -14,7 +14,6 @@
package export
import (
- "database/sql"
"fmt"
"io/ioutil"
"os/exec"
@@ -24,62 +23,32 @@ import (
"time"
"github.com/mholt/archiver"
- glog "k8s.io/klog"
-
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/constants"
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/util"
+ "k8s.io/klog"
)
-// BackupOpts contains the input arguments to the backup command
-type BackupOpts struct {
- Namespace string
- BackupName string
+// Options contains the input arguments to the backup command
+type Options struct {
+ util.GenericOptions
Bucket string
- Host string
- Port int32
- Password string
- User string
StorageType string
}
-func (bo *BackupOpts) String() string {
- return fmt.Sprintf("%s/%s", bo.Namespace, bo.BackupName)
-}
-
-func (bo *BackupOpts) getBackupFullPath() string {
+func (bo *Options) getBackupFullPath() string {
return filepath.Join(constants.BackupRootPath, bo.getBackupRelativePath())
}
-func (bo *BackupOpts) getBackupRelativePath() string {
+func (bo *Options) getBackupRelativePath() string {
backupName := fmt.Sprintf("backup-%s", time.Now().UTC().Format(time.RFC3339))
return fmt.Sprintf("%s/%s", bo.Bucket, backupName)
}
-func (bo *BackupOpts) getDestBucketURI(remotePath string) string {
+func (bo *Options) getDestBucketURI(remotePath string) string {
return fmt.Sprintf("%s://%s", bo.StorageType, remotePath)
}
-func (bo *BackupOpts) getTikvGCLifeTime(db *sql.DB) (string, error) {
- var tikvGCTime string
- sql := fmt.Sprintf("select variable_value from %s where variable_name= ?", constants.TidbMetaTable)
- row := db.QueryRow(sql, constants.TikvGCVariable)
- err := row.Scan(&tikvGCTime)
- if err != nil {
- return tikvGCTime, fmt.Errorf("query cluster %s %s failed, sql: %s, err: %v", bo, constants.TikvGCVariable, sql, err)
- }
- return tikvGCTime, nil
-}
-
-func (bo *BackupOpts) setTikvGCLifeTime(db *sql.DB, gcTime string) error {
- sql := fmt.Sprintf("update %s set variable_value = ? where variable_name = ?", constants.TidbMetaTable)
- _, err := db.Exec(sql, gcTime, constants.TikvGCVariable)
- if err != nil {
- return fmt.Errorf("set cluster %s %s failed, sql: %s, err: %v", bo, constants.TikvGCVariable, sql, err)
- }
- return nil
-}
-
-func (bo *BackupOpts) dumpTidbClusterData() (string, error) {
+func (bo *Options) dumpTidbClusterData() (string, error) {
bfPath := bo.getBackupFullPath()
err := util.EnsureDirectoryExist(bfPath)
if err != nil {
@@ -95,7 +64,7 @@ func (bo *BackupOpts) dumpTidbClusterData() (string, error) {
"--tidb-force-priority=LOW_PRIORITY",
"--verbose=3",
"--regex",
- "^(?!(mysql|test|INFORMATION_SCHEMA|PERFORMANCE_SCHEMA))",
+ "^(?!(mysql|test|INFORMATION_SCHEMA|PERFORMANCE_SCHEMA|METRICS_SCHEMA|INSPECTION_SCHEMA))",
}
output, err := exec.Command("/mydumper", args...).CombinedOutput()
@@ -105,7 +74,7 @@ func (bo *BackupOpts) dumpTidbClusterData() (string, error) {
return bfPath, nil
}
-func (bo *BackupOpts) backupDataToRemote(source, bucketURI string) error {
+func (bo *Options) backupDataToRemote(source, bucketURI string) error {
destBucket := util.NormalizeBucketURI(bucketURI)
tmpDestBucket := fmt.Sprintf("%s.tmp", destBucket)
// TODO: We may need to use exec.CommandContext to control timeouts.
@@ -114,7 +83,7 @@ func (bo *BackupOpts) backupDataToRemote(source, bucketURI string) error {
return fmt.Errorf("cluster %s, execute rclone copyto command for upload backup data %s failed, output: %s, err: %v", bo, bucketURI, string(output), err)
}
- glog.Infof("upload cluster %s backup data to %s successfully, now move it to permanent URL %s", bo, tmpDestBucket, destBucket)
+ klog.Infof("upload cluster %s backup data to %s successfully, now move it to permanent URL %s", bo, tmpDestBucket, destBucket)
// the backup was a success
// remove .tmp extension
@@ -125,10 +94,6 @@ func (bo *BackupOpts) backupDataToRemote(source, bucketURI string) error {
return nil
}
-func (bo *BackupOpts) getDSN(db string) string {
- return fmt.Sprintf("%s:%s@(%s:%d)/%s?charset=utf8", bo.User, bo.Password, bo.Host, bo.Port, db)
-}
-
/*
getCommitTsFromMetadata get commitTs from mydumper's metadata file
diff --git a/cmd/backup-manager/app/export/manager.go b/cmd/backup-manager/app/export/manager.go
index f4316c6f5f..544dd75598 100644
--- a/cmd/backup-manager/app/export/manager.go
+++ b/cmd/backup-manager/app/export/manager.go
@@ -21,26 +21,27 @@ import (
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/constants"
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/util"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ bkconstants "github.com/pingcap/tidb-operator/pkg/backup/constants"
listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// BackupManager mainly used to manage backup related work
type BackupManager struct {
backupLister listers.BackupLister
StatusUpdater controller.BackupConditionUpdaterInterface
- BackupOpts
+ Options
}
// NewBackupManager return a BackupManager
func NewBackupManager(
backupLister listers.BackupLister,
statusUpdater controller.BackupConditionUpdaterInterface,
- backupOpts BackupOpts) *BackupManager {
+ backupOpts Options) *BackupManager {
return &BackupManager{
backupLister,
statusUpdater,
@@ -48,11 +49,29 @@ func NewBackupManager(
}
}
+func (bm *BackupManager) setOptions(backup *v1alpha1.Backup) {
+ bm.Options.Host = backup.Spec.From.Host
+
+ if backup.Spec.From.Port != 0 {
+ bm.Options.Port = backup.Spec.From.Port
+ } else {
+ bm.Options.Port = bkconstants.DefaultTidbPort
+ }
+
+ if backup.Spec.From.User != "" {
+ bm.Options.User = backup.Spec.From.User
+ } else {
+ bm.Options.User = bkconstants.DefaultTidbUser
+ }
+
+ bm.Options.Password = util.GetOptionValueFromEnv(bkconstants.TidbPasswordKey, bkconstants.BackupManagerEnvVarPrefix)
+}
+
// ProcessBackup used to process the backup logic
func (bm *BackupManager) ProcessBackup() error {
- backup, err := bm.backupLister.Backups(bm.Namespace).Get(bm.BackupName)
+ backup, err := bm.backupLister.Backups(bm.Namespace).Get(bm.ResourceName)
if err != nil {
- glog.Errorf("can't find cluster %s backup %s CRD object, err: %v", bm, bm.BackupName, err)
+ klog.Errorf("can't find cluster %s backup %s CRD object, err: %v", bm, bm.ResourceName, err)
return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
Type: v1alpha1.BackupFailed,
Status: corev1.ConditionTrue,
@@ -61,23 +80,28 @@ func (bm *BackupManager) ProcessBackup() error {
})
}
+ bm.setOptions(backup)
+
var db *sql.DB
+ var dsn string
err = wait.PollImmediate(constants.PollInterval, constants.CheckTimeout, func() (done bool, err error) {
- db, err = util.OpenDB(bm.getDSN(constants.TidbMetaDB))
+ // TLS is not currently supported
+ dsn, err = bm.GetDSN(false)
if err != nil {
- glog.Warningf("can't open connection to tidb cluster %s, err: %v", bm, err)
- return false, nil
+ klog.Errorf("can't get dsn of tidb cluster %s, err: %s", bm, err)
+ return false, err
}
- if err := db.Ping(); err != nil {
- glog.Warningf("can't connect to tidb cluster %s, err: %s", bm, err)
+ db, err = util.OpenDB(dsn)
+ if err != nil {
+ klog.Warningf("can't connect to tidb cluster %s, err: %s", bm, err)
return false, nil
}
return true, nil
})
if err != nil {
- glog.Errorf("cluster %s connect failed, err: %s", bm, err)
+ klog.Errorf("cluster %s connect failed, err: %s", bm, err)
return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
Type: v1alpha1.BackupFailed,
Status: corev1.ConditionTrue,
@@ -101,9 +125,9 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro
return err
}
- oldTikvGCTime, err := bm.getTikvGCLifeTime(db)
+ oldTikvGCTime, err := bm.GetTikvGCLifeTime(db)
if err != nil {
- glog.Errorf("cluster %s get %s failed, err: %s", bm, constants.TikvGCVariable, err)
+ klog.Errorf("cluster %s get %s failed, err: %s", bm, constants.TikvGCVariable, err)
return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
Type: v1alpha1.BackupFailed,
Status: corev1.ConditionTrue,
@@ -111,11 +135,11 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro
Message: err.Error(),
})
}
- glog.Infof("cluster %s %s is %s", bm, constants.TikvGCVariable, oldTikvGCTime)
+ klog.Infof("cluster %s %s is %s", bm, constants.TikvGCVariable, oldTikvGCTime)
oldTikvGCTimeDuration, err := time.ParseDuration(oldTikvGCTime)
if err != nil {
- glog.Errorf("cluster %s parse old %s failed, err: %s", bm, constants.TikvGCVariable, err)
+ klog.Errorf("cluster %s parse old %s failed, err: %s", bm, constants.TikvGCVariable, err)
return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
Type: v1alpha1.BackupFailed,
Status: corev1.ConditionTrue,
@@ -123,20 +147,39 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro
Message: err.Error(),
})
}
- tikvGCTimeDuration, err := time.ParseDuration(constants.TikvGCLifeTime)
- if err != nil {
- glog.Errorf("cluster %s parse default %s failed, err: %s", bm, constants.TikvGCVariable, err)
- return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
- Type: v1alpha1.BackupFailed,
- Status: corev1.ConditionTrue,
- Reason: "ParseDefaultTikvGCLifeTimeFailed",
- Message: err.Error(),
- })
+
+ var tikvGCTimeDuration time.Duration
+ var tikvGCLifeTime string
+ if backup.Spec.TikvGCLifeTime != nil {
+ tikvGCLifeTime = *backup.Spec.TikvGCLifeTime
+ tikvGCTimeDuration, err = time.ParseDuration(tikvGCLifeTime)
+ if err != nil {
+ klog.Errorf("cluster %s parse configured %s failed, err: %s", bm, constants.TikvGCVariable, err)
+ return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
+ Type: v1alpha1.BackupFailed,
+ Status: corev1.ConditionTrue,
+ Reason: "ParseConfiguredTikvGCLifeTimeFailed",
+ Message: err.Error(),
+ })
+ }
+ } else {
+ tikvGCLifeTime = constants.TikvGCLifeTime
+ tikvGCTimeDuration, err = time.ParseDuration(tikvGCLifeTime)
+ if err != nil {
+ klog.Errorf("cluster %s parse default %s failed, err: %s", bm, constants.TikvGCVariable, err)
+ return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
+ Type: v1alpha1.BackupFailed,
+ Status: corev1.ConditionTrue,
+ Reason: "ParseDefaultTikvGCLifeTimeFailed",
+ Message: err.Error(),
+ })
+ }
}
+
if oldTikvGCTimeDuration < tikvGCTimeDuration {
- err = bm.setTikvGCLifeTime(db, constants.TikvGCLifeTime)
+ err = bm.SetTikvGCLifeTime(db, constants.TikvGCLifeTime)
if err != nil {
- glog.Errorf("cluster %s set tikv GC life time to %s failed, err: %s", bm, constants.TikvGCLifeTime, err)
+ klog.Errorf("cluster %s set tikv GC life time to %s failed, err: %s", bm, constants.TikvGCLifeTime, err)
return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
Type: v1alpha1.BackupFailed,
Status: corev1.ConditionTrue,
@@ -144,25 +187,14 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro
Message: err.Error(),
})
}
- glog.Infof("set cluster %s %s to %s success", bm, constants.TikvGCVariable, constants.TikvGCLifeTime)
- }
-
- backupFullPath, err := bm.dumpTidbClusterData()
- if err != nil {
- glog.Errorf("dump cluster %s data failed, err: %s", bm, err)
- return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
- Type: v1alpha1.BackupFailed,
- Status: corev1.ConditionTrue,
- Reason: "DumpTidbClusterFailed",
- Message: err.Error(),
- })
+ klog.Infof("set cluster %s %s to %s success", bm, constants.TikvGCVariable, constants.TikvGCLifeTime)
}
- glog.Infof("dump cluster %s data to %s success", bm, backupFullPath)
+ backupFullPath, backupErr := bm.dumpTidbClusterData()
if oldTikvGCTimeDuration < tikvGCTimeDuration {
- err = bm.setTikvGCLifeTime(db, oldTikvGCTime)
+ err = bm.SetTikvGCLifeTime(db, oldTikvGCTime)
if err != nil {
- glog.Errorf("cluster %s reset tikv GC life time to %s failed, err: %s", bm, oldTikvGCTime, err)
+ klog.Errorf("cluster %s reset tikv GC life time to %s failed, err: %s", bm, oldTikvGCTime, err)
return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
Type: v1alpha1.BackupFailed,
Status: corev1.ConditionTrue,
@@ -170,13 +202,24 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro
Message: err.Error(),
})
}
- glog.Infof("reset cluster %s %s to %s success", bm, constants.TikvGCVariable, oldTikvGCTime)
+ klog.Infof("reset cluster %s %s to %s success", bm, constants.TikvGCVariable, oldTikvGCTime)
+ }
+ if backupErr != nil {
+ klog.Errorf("dump cluster %s data failed, err: %s", bm, backupErr)
+ return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
+ Type: v1alpha1.BackupFailed,
+ Status: corev1.ConditionTrue,
+ Reason: "DumpTidbClusterFailed",
+ Message: backupErr.Error(),
+ })
}
+ klog.Infof("dump cluster %s data to %s success", bm, backupFullPath)
+
// TODO: Concurrent get file size and upload backup data to speed up processing time
archiveBackupPath := backupFullPath + constants.DefaultArchiveExtention
err = archiveBackupData(backupFullPath, archiveBackupPath)
if err != nil {
- glog.Errorf("archive cluster %s backup data %s failed, err: %s", bm, archiveBackupPath, err)
+ klog.Errorf("archive cluster %s backup data %s failed, err: %s", bm, archiveBackupPath, err)
return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
Type: v1alpha1.BackupFailed,
Status: corev1.ConditionTrue,
@@ -184,11 +227,11 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro
Message: err.Error(),
})
}
- glog.Infof("archive cluster %s backup data %s success", bm, archiveBackupPath)
+ klog.Infof("archive cluster %s backup data %s success", bm, archiveBackupPath)
size, err := getBackupSize(archiveBackupPath)
if err != nil {
- glog.Errorf("get cluster %s archived backup file %s size %d failed, err: %s", bm, archiveBackupPath, size, err)
+ klog.Errorf("get cluster %s archived backup file %s size %d failed, err: %s", bm, archiveBackupPath, size, err)
return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
Type: v1alpha1.BackupFailed,
Status: corev1.ConditionTrue,
@@ -196,11 +239,11 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro
Message: err.Error(),
})
}
- glog.Infof("get cluster %s archived backup file %s size %d success", bm, archiveBackupPath, size)
+ klog.Infof("get cluster %s archived backup file %s size %d success", bm, archiveBackupPath, size)
commitTs, err := getCommitTsFromMetadata(backupFullPath)
if err != nil {
- glog.Errorf("get cluster %s commitTs failed, err: %s", bm, err)
+ klog.Errorf("get cluster %s commitTs failed, err: %s", bm, err)
return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
Type: v1alpha1.BackupFailed,
Status: corev1.ConditionTrue,
@@ -208,13 +251,13 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro
Message: err.Error(),
})
}
- glog.Infof("get cluster %s commitTs %s success", bm, commitTs)
+ klog.Infof("get cluster %s commitTs %s success", bm, commitTs)
remotePath := strings.TrimPrefix(archiveBackupPath, constants.BackupRootPath+"/")
bucketURI := bm.getDestBucketURI(remotePath)
err = bm.backupDataToRemote(archiveBackupPath, bucketURI)
if err != nil {
- glog.Errorf("backup cluster %s data to %s failed, err: %s", bm, bm.StorageType, err)
+ klog.Errorf("backup cluster %s data to %s failed, err: %s", bm, bm.StorageType, err)
return bm.StatusUpdater.Update(backup, &v1alpha1.BackupCondition{
Type: v1alpha1.BackupFailed,
Status: corev1.ConditionTrue,
@@ -222,7 +265,7 @@ func (bm *BackupManager) performBackup(backup *v1alpha1.Backup, db *sql.DB) erro
Message: err.Error(),
})
}
- glog.Infof("backup cluster %s data to %s success", bm, bm.StorageType)
+ klog.Infof("backup cluster %s data to %s success", bm, bm.StorageType)
finish := time.Now()
diff --git a/cmd/backup-manager/app/import/manager.go b/cmd/backup-manager/app/import/manager.go
index c5d74a1591..624d24e156 100644
--- a/cmd/backup-manager/app/import/manager.go
+++ b/cmd/backup-manager/app/import/manager.go
@@ -14,6 +14,7 @@
package _import
import (
+ "database/sql"
"fmt"
"path/filepath"
"time"
@@ -21,38 +22,57 @@ import (
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/constants"
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/util"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ bkconstants "github.com/pingcap/tidb-operator/pkg/backup/constants"
listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
-// RestoreManager mainly used to manage backup related work
+// RestoreManager mainly used to manage restore related work
type RestoreManager struct {
restoreLister listers.RestoreLister
StatusUpdater controller.RestoreConditionUpdaterInterface
- RestoreOpts
+ Options
}
// NewRestoreManager return a RestoreManager
func NewRestoreManager(
restoreLister listers.RestoreLister,
statusUpdater controller.RestoreConditionUpdaterInterface,
- backupOpts RestoreOpts) *RestoreManager {
+ restoreOpts Options) *RestoreManager {
return &RestoreManager{
restoreLister,
statusUpdater,
- backupOpts,
+ restoreOpts,
}
}
+func (rm *RestoreManager) setOptions(restore *v1alpha1.Restore) {
+ rm.Options.Host = restore.Spec.To.Host
+
+ if restore.Spec.To.Port != 0 {
+ rm.Options.Port = restore.Spec.To.Port
+ } else {
+ rm.Options.Port = bkconstants.DefaultTidbPort
+ }
+
+ if restore.Spec.To.User != "" {
+ rm.Options.User = restore.Spec.To.User
+ } else {
+ rm.Options.User = bkconstants.DefaultTidbUser
+ }
+
+ rm.Options.Password = util.GetOptionValueFromEnv(bkconstants.TidbPasswordKey, bkconstants.BackupManagerEnvVarPrefix)
+}
+
// ProcessRestore used to process the restore logic
func (rm *RestoreManager) ProcessRestore() error {
- restore, err := rm.restoreLister.Restores(rm.Namespace).Get(rm.RestoreName)
+ restore, err := rm.restoreLister.Restores(rm.Namespace).Get(rm.ResourceName)
if err != nil {
- glog.Errorf("can't find cluster %s restore %s CRD object, err: %v", rm, rm.RestoreName, err)
+ klog.Errorf("can't find cluster %s restore %s CRD object, err: %v", rm, rm.ResourceName, err)
return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{
Type: v1alpha1.RestoreFailed,
Status: corev1.ConditionTrue,
@@ -61,23 +81,28 @@ func (rm *RestoreManager) ProcessRestore() error {
})
}
+ rm.setOptions(restore)
+
+ var db *sql.DB
+ var dsn string
err = wait.PollImmediate(constants.PollInterval, constants.CheckTimeout, func() (done bool, err error) {
- db, err := util.OpenDB(rm.getDSN(constants.TidbMetaDB))
+ // TLS is not currently supported
+ dsn, err = rm.GetDSN(false)
if err != nil {
- glog.Warningf("can't open connection to tidb cluster %s, err: %v", rm, err)
- return false, nil
+ klog.Errorf("can't get dsn of tidb cluster %s, err: %s", rm, err)
+ return false, err
}
- if err := db.Ping(); err != nil {
- glog.Warningf("can't connect to tidb cluster %s, err: %s", rm, err)
+ db, err = util.OpenDB(dsn)
+ if err != nil {
+ klog.Warningf("can't connect to tidb cluster %s, err: %s", rm, err)
return false, nil
}
- db.Close()
return true, nil
})
if err != nil {
- glog.Errorf("cluster %s connect failed, err: %s", rm, err)
+ klog.Errorf("cluster %s connect failed, err: %s", rm, err)
return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{
Type: v1alpha1.RestoreFailed,
Status: corev1.ConditionTrue,
@@ -86,6 +111,7 @@ func (rm *RestoreManager) ProcessRestore() error {
})
}
+ defer db.Close()
return rm.performRestore(restore.DeepCopy())
}
@@ -102,7 +128,7 @@ func (rm *RestoreManager) performRestore(restore *v1alpha1.Restore) error {
restoreDataPath := rm.getRestoreDataPath()
if err := rm.downloadBackupData(restoreDataPath); err != nil {
- glog.Errorf("download cluster %s backup %s data failed, err: %s", rm, rm.BackupPath, err)
+ klog.Errorf("download cluster %s backup %s data failed, err: %s", rm, rm.BackupPath, err)
return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{
Type: v1alpha1.RestoreFailed,
Status: corev1.ConditionTrue,
@@ -110,12 +136,12 @@ func (rm *RestoreManager) performRestore(restore *v1alpha1.Restore) error {
Message: fmt.Sprintf("download backup %s data failed, err: %v", rm.BackupPath, err),
})
}
- glog.Infof("download cluster %s backup %s data success", rm, rm.BackupPath)
+ klog.Infof("download cluster %s backup %s data success", rm, rm.BackupPath)
restoreDataDir := filepath.Dir(restoreDataPath)
unarchiveDataPath, err := unarchiveBackupData(restoreDataPath, restoreDataDir)
if err != nil {
- glog.Errorf("unarchive cluster %s backup %s data failed, err: %s", rm, restoreDataPath, err)
+ klog.Errorf("unarchive cluster %s backup %s data failed, err: %s", rm, restoreDataPath, err)
return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{
Type: v1alpha1.RestoreFailed,
Status: corev1.ConditionTrue,
@@ -123,11 +149,11 @@ func (rm *RestoreManager) performRestore(restore *v1alpha1.Restore) error {
Message: fmt.Sprintf("unarchive backup %s data failed, err: %v", restoreDataPath, err),
})
}
- glog.Infof("unarchive cluster %s backup %s data success", rm, restoreDataPath)
+ klog.Infof("unarchive cluster %s backup %s data success", rm, restoreDataPath)
err = rm.loadTidbClusterData(unarchiveDataPath)
if err != nil {
- glog.Errorf("restore cluster %s from backup %s failed, err: %s", rm, rm.BackupPath, err)
+ klog.Errorf("restore cluster %s from backup %s failed, err: %s", rm, rm.BackupPath, err)
return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{
Type: v1alpha1.RestoreFailed,
Status: corev1.ConditionTrue,
@@ -135,7 +161,7 @@ func (rm *RestoreManager) performRestore(restore *v1alpha1.Restore) error {
Message: fmt.Sprintf("loader backup %s data failed, err: %v", restoreDataPath, err),
})
}
- glog.Infof("restore cluster %s from backup %s success", rm, rm.BackupPath)
+ klog.Infof("restore cluster %s from backup %s success", rm, rm.BackupPath)
finish := time.Now()
diff --git a/cmd/backup-manager/app/import/restore.go b/cmd/backup-manager/app/import/restore.go
index 9db1c30a6d..22c00511f5 100644
--- a/cmd/backup-manager/app/import/restore.go
+++ b/cmd/backup-manager/app/import/restore.go
@@ -24,28 +24,19 @@ import (
"github.com/pingcap/tidb-operator/cmd/backup-manager/app/util"
)
-// RestoreOpts contains the input arguments to the restore command
-type RestoreOpts struct {
- Namespace string
- RestoreName string
- Password string
- Host string
- Port int32
- User string
- BackupPath string
+// Options contains the input arguments to the restore command
+type Options struct {
+ util.GenericOptions
+ BackupPath string
}
-func (ro *RestoreOpts) String() string {
- return fmt.Sprintf("%s/%s", ro.Namespace, ro.RestoreName)
-}
-
-func (ro *RestoreOpts) getRestoreDataPath() string {
+func (ro *Options) getRestoreDataPath() string {
backupName := filepath.Base(ro.BackupPath)
bucketName := filepath.Base(filepath.Dir(ro.BackupPath))
return filepath.Join(constants.BackupRootPath, bucketName, backupName)
}
-func (ro *RestoreOpts) downloadBackupData(localPath string) error {
+func (ro *Options) downloadBackupData(localPath string) error {
if err := util.EnsureDirectoryExist(filepath.Dir(localPath)); err != nil {
return err
}
@@ -62,29 +53,28 @@ func (ro *RestoreOpts) downloadBackupData(localPath string) error {
return nil
}
-func (ro *RestoreOpts) loadTidbClusterData(restorePath string) error {
+func (ro *Options) loadTidbClusterData(restorePath string) error {
if exist := util.IsDirExist(restorePath); !exist {
return fmt.Errorf("dir %s does not exist or is not a dir", restorePath)
}
args := []string{
- fmt.Sprintf("-d=%s", restorePath),
- fmt.Sprintf("-h=%s", ro.Host),
- fmt.Sprintf("-P=%d", ro.Port),
- fmt.Sprintf("-u=%s", ro.User),
- fmt.Sprintf("-p=%s", ro.Password),
+ "--status-addr=0.0.0.0:8289",
+ "--backend=tidb",
+ "--server-mode=false",
+ "–-log-file=",
+ fmt.Sprintf("--tidb-user=%s", ro.User),
+ fmt.Sprintf("--tidb-password=%s", ro.Password),
+ fmt.Sprintf("--tidb-host=%s", ro.Host),
+ fmt.Sprintf("--d=%s", restorePath),
}
- output, err := exec.Command("/loader", args...).CombinedOutput()
+ output, err := exec.Command("/tidb-lightning", args...).CombinedOutput()
if err != nil {
return fmt.Errorf("cluster %s, execute loader command %v failed, output: %s, err: %v", ro, args, string(output), err)
}
return nil
}
-func (ro *RestoreOpts) getDSN(db string) string {
- return fmt.Sprintf("%s:%s@(%s:%d)/%s?charset=utf8", ro.User, ro.Password, ro.Host, ro.Port, db)
-}
-
// unarchiveBackupData unarchive backup data to dest dir
func unarchiveBackupData(backupFile, destDir string) (string, error) {
var unarchiveBackupPath string
diff --git a/cmd/backup-manager/app/restore/manager.go b/cmd/backup-manager/app/restore/manager.go
index 2154c1c5c1..9b0dc28a51 100644
--- a/cmd/backup-manager/app/restore/manager.go
+++ b/cmd/backup-manager/app/restore/manager.go
@@ -14,16 +14,20 @@
package restore
import (
+ "database/sql"
"fmt"
"time"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- glog "k8s.io/klog"
-
+ "github.com/pingcap/tidb-operator/cmd/backup-manager/app/constants"
+ "github.com/pingcap/tidb-operator/cmd/backup-manager/app/util"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ bkconstants "github.com/pingcap/tidb-operator/pkg/backup/constants"
listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/klog"
)
type Manager struct {
@@ -44,11 +48,29 @@ func NewManager(
}
}
+func (rm *Manager) setOptions(restore *v1alpha1.Restore) {
+ rm.Options.Host = restore.Spec.To.Host
+
+ if restore.Spec.To.Port != 0 {
+ rm.Options.Port = restore.Spec.To.Port
+ } else {
+ rm.Options.Port = bkconstants.DefaultTidbPort
+ }
+
+ if restore.Spec.To.User != "" {
+ rm.Options.User = restore.Spec.To.User
+ } else {
+ rm.Options.User = bkconstants.DefaultTidbUser
+ }
+
+ rm.Options.Password = util.GetOptionValueFromEnv(bkconstants.TidbPasswordKey, bkconstants.BackupManagerEnvVarPrefix)
+}
+
// ProcessRestore used to process the restore logic
func (rm *Manager) ProcessRestore() error {
- restore, err := rm.restoreLister.Restores(rm.Namespace).Get(rm.RestoreName)
+ restore, err := rm.restoreLister.Restores(rm.Namespace).Get(rm.ResourceName)
if err != nil {
- glog.Errorf("can't find cluster %s restore %s CRD object, err: %v", rm, rm.RestoreName, err)
+ klog.Errorf("can't find cluster %s restore %s CRD object, err: %v", rm, rm.ResourceName, err)
return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{
Type: v1alpha1.RestoreFailed,
Status: corev1.ConditionTrue,
@@ -60,10 +82,40 @@ func (rm *Manager) ProcessRestore() error {
return fmt.Errorf("no br config in %s", rm)
}
- return rm.performRestore(restore.DeepCopy())
+ rm.setOptions(restore)
+
+ var db *sql.DB
+ var dsn string
+ err = wait.PollImmediate(constants.PollInterval, constants.CheckTimeout, func() (done bool, err error) {
+ dsn, err = rm.GetDSN(rm.TLSClient)
+ if err != nil {
+ klog.Errorf("can't get dsn of tidb cluster %s, err: %s", rm, err)
+ return false, err
+ }
+
+ db, err = util.OpenDB(dsn)
+ if err != nil {
+ klog.Warningf("can't connect to tidb cluster %s, err: %s", rm, err)
+ return false, nil
+ }
+ return true, nil
+ })
+
+ if err != nil {
+ klog.Errorf("cluster %s connect failed, err: %s", rm, err)
+ return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{
+ Type: v1alpha1.RestoreFailed,
+ Status: corev1.ConditionTrue,
+ Reason: "ConnectTidbFailed",
+ Message: err.Error(),
+ })
+ }
+
+ defer db.Close()
+ return rm.performRestore(restore.DeepCopy(), db)
}
-func (rm *Manager) performRestore(restore *v1alpha1.Restore) error {
+func (rm *Manager) performRestore(restore *v1alpha1.Restore, db *sql.DB) error {
started := time.Now()
err := rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{
@@ -74,16 +126,95 @@ func (rm *Manager) performRestore(restore *v1alpha1.Restore) error {
return err
}
- if err := rm.restoreData(restore); err != nil {
- glog.Errorf("restore cluster %s from %s failed, err: %s", rm, restore.Spec.Type, err)
+ oldTikvGCTime, err := rm.GetTikvGCLifeTime(db)
+ if err != nil {
+ klog.Errorf("cluster %s get %s failed, err: %s", rm, constants.TikvGCVariable, err)
return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{
Type: v1alpha1.RestoreFailed,
Status: corev1.ConditionTrue,
- Reason: "RestoreDataFromRemoteFailed",
+ Reason: "GetTikvGCLifeTimeFailed",
+ Message: err.Error(),
+ })
+ }
+ klog.Infof("cluster %s %s is %s", rm, constants.TikvGCVariable, oldTikvGCTime)
+
+ oldTikvGCTimeDuration, err := time.ParseDuration(oldTikvGCTime)
+ if err != nil {
+ klog.Errorf("cluster %s parse old %s failed, err: %s", rm, constants.TikvGCVariable, err)
+ return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{
+ Type: v1alpha1.RestoreFailed,
+ Status: corev1.ConditionTrue,
+ Reason: "ParseOldTikvGCLifeTimeFailed",
Message: err.Error(),
})
}
- glog.Infof("restore cluster %s from %s succeed", rm, restore.Spec.Type)
+
+ var tikvGCTimeDuration time.Duration
+ var tikvGCLifeTime string
+ if restore.Spec.TikvGCLifeTime != nil {
+ tikvGCLifeTime = *restore.Spec.TikvGCLifeTime
+ tikvGCTimeDuration, err = time.ParseDuration(tikvGCLifeTime)
+ if err != nil {
+ klog.Errorf("cluster %s parse configured %s failed, err: %s", rm, constants.TikvGCVariable, err)
+ return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{
+ Type: v1alpha1.RestoreFailed,
+ Status: corev1.ConditionTrue,
+ Reason: "ParseConfiguredTikvGCLifeTimeFailed",
+ Message: err.Error(),
+ })
+ }
+ } else {
+ tikvGCLifeTime = constants.TikvGCLifeTime
+ tikvGCTimeDuration, err = time.ParseDuration(tikvGCLifeTime)
+ if err != nil {
+ klog.Errorf("cluster %s parse default %s failed, err: %s", rm, constants.TikvGCVariable, err)
+ return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{
+ Type: v1alpha1.RestoreFailed,
+ Status: corev1.ConditionTrue,
+ Reason: "ParseDefaultTikvGCLifeTimeFailed",
+ Message: err.Error(),
+ })
+ }
+ }
+
+ if oldTikvGCTimeDuration < tikvGCTimeDuration {
+ err = rm.SetTikvGCLifeTime(db, tikvGCLifeTime)
+ if err != nil {
+ klog.Errorf("cluster %s set tikv GC life time to %s failed, err: %s", rm, tikvGCLifeTime, err)
+ return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{
+ Type: v1alpha1.RestoreFailed,
+ Status: corev1.ConditionTrue,
+ Reason: "SetTikvGCLifeTimeFailed",
+ Message: err.Error(),
+ })
+ }
+ klog.Infof("set cluster %s %s to %s success", rm, constants.TikvGCVariable, tikvGCLifeTime)
+ }
+
+ restoreErr := rm.restoreData(restore)
+ if oldTikvGCTimeDuration < tikvGCTimeDuration {
+ err = rm.SetTikvGCLifeTime(db, oldTikvGCTime)
+ if err != nil {
+ klog.Errorf("cluster %s reset tikv GC life time to %s failed, err: %s", rm, oldTikvGCTime, err)
+ return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{
+ Type: v1alpha1.RestoreFailed,
+ Status: corev1.ConditionTrue,
+ Reason: "ResetTikvGCLifeTimeFailed",
+ Message: err.Error(),
+ })
+ }
+ klog.Infof("reset cluster %s %s to %s success", rm, constants.TikvGCVariable, oldTikvGCTime)
+ }
+ if restoreErr != nil {
+ klog.Errorf("restore cluster %s from %s failed, err: %s", rm, restore.Spec.Type, restoreErr)
+ return rm.StatusUpdater.Update(restore, &v1alpha1.RestoreCondition{
+ Type: v1alpha1.RestoreFailed,
+ Status: corev1.ConditionTrue,
+ Reason: "RestoreDataFromRemoteFailed",
+ Message: restoreErr.Error(),
+ })
+ }
+ klog.Infof("restore cluster %s from %s succeed", rm, restore.Spec.Type)
finish := time.Now()
restore.Status.TimeStarted = metav1.Time{Time: started}
diff --git a/cmd/backup-manager/app/restore/restore.go b/cmd/backup-manager/app/restore/restore.go
index 200c013753..ff79167a49 100644
--- a/cmd/backup-manager/app/restore/restore.go
+++ b/cmd/backup-manager/app/restore/restore.go
@@ -14,29 +14,41 @@
package restore
import (
+ "bufio"
"fmt"
+ "io"
+ "io/ioutil"
"os/exec"
+ "path"
+ "strings"
- glog "k8s.io/klog"
-
- "github.com/pingcap/tidb-operator/cmd/backup-manager/app/util"
+ backupUtil "github.com/pingcap/tidb-operator/cmd/backup-manager/app/util"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ "github.com/pingcap/tidb-operator/pkg/util"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/klog"
)
type Options struct {
- Namespace string
- RestoreName string
-}
-
-func (ro *Options) String() string {
- return fmt.Sprintf("%s/%s", ro.Namespace, ro.RestoreName)
+ backupUtil.GenericOptions
}
func (ro *Options) restoreData(restore *v1alpha1.Restore) error {
+ clusterNamespace := restore.Spec.BR.ClusterNamespace
+ if restore.Spec.BR.ClusterNamespace == "" {
+ clusterNamespace = restore.Namespace
+ }
args, err := constructBROptions(restore)
if err != nil {
return err
}
+ args = append(args, fmt.Sprintf("--pd=%s-pd.%s:2379", restore.Spec.BR.Cluster, clusterNamespace))
+ if ro.TLSCluster {
+ args = append(args, fmt.Sprintf("--ca=%s", path.Join(util.ClusterClientTLSPath, corev1.ServiceAccountRootCAKey)))
+ args = append(args, fmt.Sprintf("--cert=%s", path.Join(util.ClusterClientTLSPath, corev1.TLSCertKey)))
+ args = append(args, fmt.Sprintf("--key=%s", path.Join(util.ClusterClientTLSPath, corev1.TLSPrivateKeyKey)))
+ }
+
var restoreType string
if restore.Spec.Type == "" {
restoreType = string(v1alpha1.BackupTypeFull)
@@ -48,17 +60,50 @@ func (ro *Options) restoreData(restore *v1alpha1.Restore) error {
restoreType,
}
fullArgs = append(fullArgs, args...)
- glog.Infof("Running br command with args: %v", fullArgs)
- output, err := exec.Command("br", fullArgs...).CombinedOutput()
+ klog.Infof("Running br command with args: %v", fullArgs)
+ bin := "br" + backupUtil.Suffix(ro.TiKVVersion)
+ cmd := exec.Command(bin, fullArgs...)
+
+ stdOut, err := cmd.StdoutPipe()
+ if err != nil {
+ return fmt.Errorf("cluster %s, create stdout pipe failed, err: %v", ro, err)
+ }
+ stdErr, err := cmd.StderrPipe()
+ if err != nil {
+ return fmt.Errorf("cluster %s, create stderr pipe failed, err: %v", ro, err)
+ }
+ err = cmd.Start()
+ if err != nil {
+ return fmt.Errorf("cluster %s, execute br command failed, args: %s, err: %v", ro, fullArgs, err)
+ }
+ var errMsg string
+ reader := bufio.NewReader(stdOut)
+ for {
+ line, err := reader.ReadString('\n')
+ if strings.Contains(line, "[ERROR]") {
+ errMsg += line
+ }
+ klog.Infof(strings.Replace(line, "\n", "", -1))
+ if err != nil || io.EOF == err {
+ break
+ }
+ }
+ tmpErr, _ := ioutil.ReadAll(stdErr)
+ if len(tmpErr) > 0 {
+ klog.Infof(string(tmpErr))
+ errMsg += string(tmpErr)
+ }
+
+ err = cmd.Wait()
if err != nil {
- return fmt.Errorf("cluster %s, execute br command %v failed, output: %s, err: %v", ro, fullArgs, string(output), err)
+ return fmt.Errorf("cluster %s, wait pipe message failed, errMsg %s, err: %v", ro, errMsg, err)
}
- glog.Infof("Restore data for cluster %s successfully, output: %s", ro, string(output))
+ klog.Infof("Restore data for cluster %s successfully", ro)
return nil
}
func constructBROptions(restore *v1alpha1.Restore) ([]string, error) {
- args, err := util.ConstructBRGlobalOptionsForRestore(restore)
+ args, err := backupUtil.ConstructBRGlobalOptionsForRestore(restore)
if err != nil {
return nil, err
}
diff --git a/cmd/backup-manager/app/util/generic.go b/cmd/backup-manager/app/util/generic.go
new file mode 100644
index 0000000000..54f49d45bd
--- /dev/null
+++ b/cmd/backup-manager/app/util/generic.go
@@ -0,0 +1,95 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "database/sql"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "path"
+
+ "github.com/go-sql-driver/mysql"
+ "github.com/pingcap/tidb-operator/cmd/backup-manager/app/constants"
+ "github.com/pingcap/tidb-operator/pkg/util"
+ corev1 "k8s.io/api/core/v1"
+)
+
+// GenericOptions contains the generic input arguments to the backup/restore command
+type GenericOptions struct {
+ Namespace string
+ // ResourceName can be the name of a backup or restore resource
+ ResourceName string
+ TLSClient bool
+ TLSCluster bool
+ Host string
+ Port int32
+ Password string
+ User string
+ TiKVVersion string
+}
+
+func (bo *GenericOptions) String() string {
+ return fmt.Sprintf("%s/%s", bo.Namespace, bo.ResourceName)
+}
+
+func (bo *GenericOptions) GetDSN(enabledTLSClient bool) (string, error) {
+ if !enabledTLSClient {
+ return fmt.Sprintf("%s:%s@(%s:%d)/%s?charset=utf8", bo.User, bo.Password, bo.Host, bo.Port, constants.TidbMetaDB), nil
+ }
+ rootCertPool := x509.NewCertPool()
+ pem, err := ioutil.ReadFile(path.Join(util.TiDBClientTLSPath, corev1.ServiceAccountRootCAKey))
+ if err != nil {
+ return "", err
+ }
+ if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
+ return "", errors.New("Failed to append PEM")
+ }
+ clientCert := make([]tls.Certificate, 0, 1)
+ certs, err := tls.LoadX509KeyPair(
+ path.Join(util.TiDBClientTLSPath, corev1.TLSCertKey),
+ path.Join(util.TiDBClientTLSPath, corev1.TLSPrivateKeyKey))
+ if err != nil {
+ return "", err
+ }
+ clientCert = append(clientCert, certs)
+ mysql.RegisterTLSConfig("customer", &tls.Config{
+ RootCAs: rootCertPool,
+ Certificates: clientCert,
+ ServerName: bo.Host,
+ })
+ return fmt.Sprintf("%s:%s@(%s:%d)/%s?tls=customer&charset=utf8", bo.User, bo.Password, bo.Host, bo.Port, constants.TidbMetaDB), nil
+}
+
+func (bo *GenericOptions) GetTikvGCLifeTime(db *sql.DB) (string, error) {
+ var tikvGCTime string
+ sql := fmt.Sprintf("select variable_value from %s where variable_name= ?", constants.TidbMetaTable)
+ row := db.QueryRow(sql, constants.TikvGCVariable)
+ err := row.Scan(&tikvGCTime)
+ if err != nil {
+ return tikvGCTime, fmt.Errorf("query cluster %s %s failed, sql: %s, err: %v", bo, constants.TikvGCVariable, sql, err)
+ }
+ return tikvGCTime, nil
+}
+
+func (bo *GenericOptions) SetTikvGCLifeTime(db *sql.DB, gcTime string) error {
+ sql := fmt.Sprintf("update %s set variable_value = ? where variable_name = ?", constants.TidbMetaTable)
+ _, err := db.Exec(sql, gcTime, constants.TikvGCVariable)
+ if err != nil {
+ return fmt.Errorf("set cluster %s %s failed, sql: %s, err: %v", bo, constants.TikvGCVariable, sql, err)
+ }
+ return nil
+}
diff --git a/cmd/backup-manager/app/util/k8s.go b/cmd/backup-manager/app/util/k8s.go
index a0de91bf15..a10b5c1fed 100644
--- a/cmd/backup-manager/app/util/k8s.go
+++ b/cmd/backup-manager/app/util/k8s.go
@@ -22,13 +22,13 @@ import (
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/record"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// NewEventRecorder return the specify source's recoder
func NewEventRecorder(kubeCli kubernetes.Interface, source string) record.EventRecorder {
eventBroadcaster := record.NewBroadcaster()
- eventBroadcaster.StartLogging(glog.Infof)
+ eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&eventv1.EventSinkImpl{
Interface: eventv1.New(kubeCli.CoreV1().RESTClient()).Events("")})
recorder := eventBroadcaster.NewRecorder(v1alpha1.Scheme, corev1.EventSource{Component: source})
diff --git a/cmd/backup-manager/app/util/util.go b/cmd/backup-manager/app/util/util.go
index e7d436c14c..89cffee275 100644
--- a/cmd/backup-manager/app/util/util.go
+++ b/cmd/backup-manager/app/util/util.go
@@ -19,14 +19,22 @@ import (
"os"
"strings"
+ "github.com/Masterminds/semver"
"github.com/spf13/pflag"
+ "k8s.io/klog"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
)
var (
- cmdHelpMsg string
+ cmdHelpMsg string
+ supportedVersions = map[string]struct{}{
+ "3.1": {},
+ "4.0": {},
+ }
+ // DefaultVersion is the default tikv and br version
+ DefaultVersion = "4.0"
)
func validCmdFlagFunc(flag *pflag.Flag) {
@@ -98,17 +106,10 @@ func NormalizeBucketURI(bucket string) string {
return strings.Replace(bucket, "://", ":", 1)
}
-// SetFlagsFromEnv set the environment variable. Will override default values, but be overridden by command line parameters.
-func SetFlagsFromEnv(flags *pflag.FlagSet, prefix string) error {
- flags.VisitAll(func(f *pflag.Flag) {
- envVar := prefix + "_" + strings.Replace(strings.ToUpper(f.Name), "-", "_", -1)
- value := os.Getenv(envVar)
- if value != "" {
- flags.Set(f.Name, value)
- }
- })
-
- return nil
+// GetOptionValueFromEnv get option's value from environment variable. If unset, return empty string.
+func GetOptionValueFromEnv(option, envPrefix string) string {
+ envVar := envPrefix + "_" + strings.Replace(strings.ToUpper(option), "-", "_", -1)
+ return os.Getenv(envVar)
}
// ConstructBRGlobalOptionsForBackup constructs BR global options for backup and also return the remote path.
@@ -119,7 +120,7 @@ func ConstructBRGlobalOptionsForBackup(backup *v1alpha1.Backup) ([]string, strin
return nil, "", fmt.Errorf("no config for br in backup %s/%s", backup.Namespace, backup.Name)
}
args = append(args, constructBRGlobalOptions(config)...)
- storageArgs, path, err := getRemoteStorage(backup.Spec.StorageProvider)
+ storageArgs, remotePath, err := getRemoteStorage(backup.Spec.StorageProvider)
if err != nil {
return nil, "", err
}
@@ -130,7 +131,7 @@ func ConstructBRGlobalOptionsForBackup(backup *v1alpha1.Backup) ([]string, strin
if backup.Spec.Type == v1alpha1.BackupTypeTable && config.Table != "" {
args = append(args, fmt.Sprintf("--table=%s", config.Table))
}
- return args, path, nil
+ return args, remotePath, nil
}
// ConstructBRGlobalOptionsForRestore constructs BR global options for restore.
@@ -158,16 +159,6 @@ func ConstructBRGlobalOptionsForRestore(restore *v1alpha1.Restore) ([]string, er
// constructBRGlobalOptions constructs BR basic global options.
func constructBRGlobalOptions(config *v1alpha1.BRConfig) []string {
var args []string
- args = append(args, fmt.Sprintf("--pd=%s", config.PDAddress))
- if config.CA != "" {
- args = append(args, fmt.Sprintf("--ca=%s", config.CA))
- }
- if config.Cert != "" {
- args = append(args, fmt.Sprintf("--cert=%s", config.Cert))
- }
- if config.Key != "" {
- args = append(args, fmt.Sprintf("--key=%s", config.Key))
- }
if config.LogLevel != "" {
args = append(args, fmt.Sprintf("--log-level=%s", config.LogLevel))
}
@@ -179,3 +170,20 @@ func constructBRGlobalOptions(config *v1alpha1.BRConfig) []string {
}
return args
}
+
+// Suffix parses the major and minor version from the string and return the suffix
+func Suffix(version string) string {
+ numS := strings.Split(DefaultVersion, ".")
+ defaultSuffix := numS[0] + numS[1]
+
+ v, err := semver.NewVersion(version)
+ if err != nil {
+ klog.Errorf("Parse version %s failure, error: %v", version, err)
+ return defaultSuffix
+ }
+ parsed := fmt.Sprintf("%d.%d", v.Major(), v.Minor())
+ if _, ok := supportedVersions[parsed]; ok {
+ return fmt.Sprintf("%d%d", v.Major(), v.Minor())
+ }
+ return defaultSuffix
+}
diff --git a/cmd/controller-manager/main.go b/cmd/controller-manager/main.go
index 7da64480c3..a5ae55c7a3 100644
--- a/cmd/controller-manager/main.go
+++ b/cmd/controller-manager/main.go
@@ -29,6 +29,7 @@ import (
"github.com/pingcap/tidb-operator/pkg/controller/autoscaler"
"github.com/pingcap/tidb-operator/pkg/controller/backup"
"github.com/pingcap/tidb-operator/pkg/controller/backupschedule"
+ "github.com/pingcap/tidb-operator/pkg/controller/periodicity"
"github.com/pingcap/tidb-operator/pkg/controller/restore"
"github.com/pingcap/tidb-operator/pkg/controller/tidbcluster"
"github.com/pingcap/tidb-operator/pkg/controller/tidbinitializer"
@@ -46,21 +47,22 @@ import (
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
"k8s.io/component-base/logs"
- glog "k8s.io/klog"
+ "k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
- printVersion bool
- workers int
- autoFailover bool
- pdFailoverPeriod time.Duration
- tikvFailoverPeriod time.Duration
- tidbFailoverPeriod time.Duration
- leaseDuration = 15 * time.Second
- renewDuration = 5 * time.Second
- retryPeriod = 3 * time.Second
- waitDuration = 5 * time.Second
+ printVersion bool
+ workers int
+ autoFailover bool
+ pdFailoverPeriod time.Duration
+ tikvFailoverPeriod time.Duration
+ tidbFailoverPeriod time.Duration
+ tiflashFailoverPeriod time.Duration
+ leaseDuration = 15 * time.Second
+ renewDuration = 5 * time.Second
+ retryPeriod = 3 * time.Second
+ waitDuration = 5 * time.Second
)
func init() {
@@ -71,6 +73,7 @@ func init() {
flag.BoolVar(&autoFailover, "auto-failover", true, "Auto failover")
flag.DurationVar(&pdFailoverPeriod, "pd-failover-period", time.Duration(5*time.Minute), "PD failover period default(5m)")
flag.DurationVar(&tikvFailoverPeriod, "tikv-failover-period", time.Duration(5*time.Minute), "TiKV failover period default(5m)")
+ flag.DurationVar(&tiflashFailoverPeriod, "tiflash-failover-period", time.Duration(5*time.Minute), "TiFlash failover period default(5m)")
flag.DurationVar(&tidbFailoverPeriod, "tidb-failover-period", time.Duration(5*time.Minute), "TiDB failover period")
flag.DurationVar(&controller.ResyncDuration, "resync-duration", time.Duration(30*time.Second), "Resync time of informer")
flag.BoolVar(&controller.TestMode, "test-mode", false, "whether tidb-operator run in test mode")
@@ -93,38 +96,42 @@ func main() {
logs.InitLogs()
defer logs.FlushLogs()
+ flag.CommandLine.VisitAll(func(flag *flag.Flag) {
+ klog.V(1).Infof("FLAG: --%s=%q", flag.Name, flag.Value)
+ })
+
hostName, err := os.Hostname()
if err != nil {
- glog.Fatalf("failed to get hostname: %v", err)
+ klog.Fatalf("failed to get hostname: %v", err)
}
ns := os.Getenv("NAMESPACE")
if ns == "" {
- glog.Fatal("NAMESPACE environment variable not set")
+ klog.Fatal("NAMESPACE environment variable not set")
}
cfg, err := rest.InClusterConfig()
if err != nil {
- glog.Fatalf("failed to get config: %v", err)
+ klog.Fatalf("failed to get config: %v", err)
}
cli, err := versioned.NewForConfig(cfg)
if err != nil {
- glog.Fatalf("failed to create Clientset: %v", err)
+ klog.Fatalf("failed to create Clientset: %v", err)
}
var kubeCli kubernetes.Interface
kubeCli, err = kubernetes.NewForConfig(cfg)
if err != nil {
- glog.Fatalf("failed to get kubernetes Clientset: %v", err)
+ klog.Fatalf("failed to get kubernetes Clientset: %v", err)
}
asCli, err := asclientset.NewForConfig(cfg)
if err != nil {
- glog.Fatalf("failed to get advanced-statefulset Clientset: %v", err)
+ klog.Fatalf("failed to get advanced-statefulset Clientset: %v", err)
}
// TODO: optimize the read of genericCli with the shared cache
genericCli, err := client.New(cfg, client.Options{Scheme: scheme.Scheme})
if err != nil {
- glog.Fatalf("failed to get the generic kube-apiserver client: %v", err)
+ klog.Fatalf("failed to get the generic kube-apiserver client: %v", err)
}
// note that kubeCli here must not be the hijacked one
@@ -143,20 +150,14 @@ func main() {
var informerFactory informers.SharedInformerFactory
var kubeInformerFactory kubeinformers.SharedInformerFactory
- if controller.ClusterScoped {
- informerFactory = informers.NewSharedInformerFactory(cli, controller.ResyncDuration)
- kubeInformerFactory = kubeinformers.NewSharedInformerFactory(kubeCli, controller.ResyncDuration)
- } else {
- options := []informers.SharedInformerOption{
- informers.WithNamespace(ns),
- }
- informerFactory = informers.NewSharedInformerFactoryWithOptions(cli, controller.ResyncDuration, options...)
-
- kubeoptions := []kubeinformers.SharedInformerOption{
- kubeinformers.WithNamespace(ns),
- }
- kubeInformerFactory = kubeinformers.NewSharedInformerFactoryWithOptions(kubeCli, controller.ResyncDuration, kubeoptions...)
+ var options []informers.SharedInformerOption
+ var kubeoptions []kubeinformers.SharedInformerOption
+ if !controller.ClusterScoped {
+ options = append(options, informers.WithNamespace(ns))
+ kubeoptions = append(kubeoptions, kubeinformers.WithNamespace(ns))
}
+ informerFactory = informers.NewSharedInformerFactoryWithOptions(cli, controller.ResyncDuration, options...)
+ kubeInformerFactory = kubeinformers.NewSharedInformerFactoryWithOptions(kubeCli, controller.ResyncDuration, kubeoptions...)
rl := resourcelock.EndpointsLock{
EndpointsMeta: metav1.ObjectMeta{
@@ -177,18 +178,24 @@ func main() {
// Upgrade before running any controller logic. If it fails, we wait
// for process supervisor to restart it again.
if err := operatorUpgrader.Upgrade(); err != nil {
- glog.Fatalf("failed to upgrade: %v", err)
+ klog.Fatalf("failed to upgrade: %v", err)
}
- tcController := tidbcluster.NewController(kubeCli, cli, genericCli, informerFactory, kubeInformerFactory, autoFailover, pdFailoverPeriod, tikvFailoverPeriod, tidbFailoverPeriod)
+ tcController := tidbcluster.NewController(kubeCli, cli, genericCli, informerFactory, kubeInformerFactory, autoFailover, pdFailoverPeriod, tikvFailoverPeriod, tidbFailoverPeriod, tiflashFailoverPeriod)
backupController := backup.NewController(kubeCli, cli, informerFactory, kubeInformerFactory)
restoreController := restore.NewController(kubeCli, cli, informerFactory, kubeInformerFactory)
bsController := backupschedule.NewController(kubeCli, cli, informerFactory, kubeInformerFactory)
tidbInitController := tidbinitializer.NewController(kubeCli, cli, genericCli, informerFactory, kubeInformerFactory)
tidbMonitorController := tidbmonitor.NewController(kubeCli, genericCli, informerFactory, kubeInformerFactory)
+
+ var periodicityController *periodicity.Controller
+ if controller.PodWebhookEnabled {
+ periodicityController = periodicity.NewController(kubeCli, informerFactory, kubeInformerFactory)
+ }
+
var autoScalerController *autoscaler.Controller
if features.DefaultFeatureGate.Enabled(features.AutoScaling) {
- autoScalerController = autoscaler.NewController(kubeCli, genericCli, informerFactory, kubeInformerFactory)
+ autoScalerController = autoscaler.NewController(kubeCli, cli, informerFactory, kubeInformerFactory)
}
// Start informer factories after all controller are initialized.
informerFactory.Start(ctx.Done())
@@ -197,28 +204,31 @@ func main() {
// Wait for all started informers' cache were synced.
for v, synced := range informerFactory.WaitForCacheSync(wait.NeverStop) {
if !synced {
- glog.Fatalf("error syncing informer for %v", v)
+ klog.Fatalf("error syncing informer for %v", v)
}
}
for v, synced := range kubeInformerFactory.WaitForCacheSync(wait.NeverStop) {
if !synced {
- glog.Fatalf("error syncing informer for %v", v)
+ klog.Fatalf("error syncing informer for %v", v)
}
}
- glog.Infof("cache of informer factories sync successfully")
+ klog.Infof("cache of informer factories sync successfully")
go wait.Forever(func() { backupController.Run(workers, ctx.Done()) }, waitDuration)
go wait.Forever(func() { restoreController.Run(workers, ctx.Done()) }, waitDuration)
go wait.Forever(func() { bsController.Run(workers, ctx.Done()) }, waitDuration)
go wait.Forever(func() { tidbInitController.Run(workers, ctx.Done()) }, waitDuration)
go wait.Forever(func() { tidbMonitorController.Run(workers, ctx.Done()) }, waitDuration)
+ if controller.PodWebhookEnabled {
+ go wait.Forever(func() { periodicityController.Run(ctx.Done()) }, waitDuration)
+ }
if features.DefaultFeatureGate.Enabled(features.AutoScaling) {
go wait.Forever(func() { autoScalerController.Run(workers, ctx.Done()) }, waitDuration)
}
wait.Forever(func() { tcController.Run(workers, ctx.Done()) }, waitDuration)
}
onStopped := func() {
- glog.Fatalf("leader election lost")
+ klog.Fatalf("leader election lost")
}
// leader election for multiple tidb-controller-manager instances
@@ -235,5 +245,5 @@ func main() {
})
}, waitDuration)
- glog.Fatal(http.ListenAndServe(":6060", nil))
+ klog.Fatal(http.ListenAndServe(":6060", nil))
}
diff --git a/cmd/discovery/main.go b/cmd/discovery/main.go
index 8e890ee428..9aea6001d0 100644
--- a/cmd/discovery/main.go
+++ b/cmd/discovery/main.go
@@ -27,7 +27,7 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/component-base/logs"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
var (
@@ -52,21 +52,25 @@ func main() {
logs.InitLogs()
defer logs.FlushLogs()
+ flag.CommandLine.VisitAll(func(flag *flag.Flag) {
+ klog.V(1).Infof("FLAG: --%s=%q", flag.Name, flag.Value)
+ })
+
cfg, err := rest.InClusterConfig()
if err != nil {
- glog.Fatalf("failed to get config: %v", err)
+ klog.Fatalf("failed to get config: %v", err)
}
cli, err := versioned.NewForConfig(cfg)
if err != nil {
- glog.Fatalf("failed to create Clientset: %v", err)
+ klog.Fatalf("failed to create Clientset: %v", err)
}
kubeCli, err := kubernetes.NewForConfig(cfg)
if err != nil {
- glog.Fatalf("failed to get kubernetes Clientset: %v", err)
+ klog.Fatalf("failed to get kubernetes Clientset: %v", err)
}
go wait.Forever(func() {
server.StartServer(cli, kubeCli, port)
}, 5*time.Second)
- glog.Fatal(http.ListenAndServe(":6060", nil))
+ klog.Fatal(http.ListenAndServe(":6060", nil))
}
diff --git a/cmd/scheduler/main.go b/cmd/scheduler/main.go
index af310d45b3..f7e4e7b693 100644
--- a/cmd/scheduler/main.go
+++ b/cmd/scheduler/main.go
@@ -28,7 +28,7 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/component-base/logs"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
var (
@@ -54,21 +54,25 @@ func main() {
logs.InitLogs()
defer logs.FlushLogs()
+ flag.CommandLine.VisitAll(func(flag *flag.Flag) {
+ klog.V(1).Infof("FLAG: --%s=%q", flag.Name, flag.Value)
+ })
+
cfg, err := rest.InClusterConfig()
if err != nil {
- glog.Fatalf("failed to get config: %v", err)
+ klog.Fatalf("failed to get config: %v", err)
}
kubeCli, err := kubernetes.NewForConfig(cfg)
if err != nil {
- glog.Fatalf("failed to get kubernetes Clientset: %v", err)
+ klog.Fatalf("failed to get kubernetes Clientset: %v", err)
}
cli, err := versioned.NewForConfig(cfg)
if err != nil {
- glog.Fatalf("failed to create Clientset: %v", err)
+ klog.Fatalf("failed to create Clientset: %v", err)
}
go wait.Forever(func() {
server.StartServer(kubeCli, cli, port)
}, 5*time.Second)
- glog.Fatal(http.ListenAndServe(":6060", nil))
+ klog.Fatal(http.ListenAndServe(":6060", nil))
}
diff --git a/cmd/to-crdgen/main.go b/cmd/to-crdgen/main.go
index 7dd0532a13..1a9f5386c7 100644
--- a/cmd/to-crdgen/main.go
+++ b/cmd/to-crdgen/main.go
@@ -16,9 +16,10 @@ package main
import (
"flag"
"fmt"
+ "os"
+
"github.com/pingcap/tidb-operator/pkg/to-crdgen/cmd"
"github.com/spf13/pflag"
- "os"
)
func main() {
diff --git a/deploy/aliyun/main.tf b/deploy/aliyun/main.tf
index 0c7be43e85..ef520f6b05 100644
--- a/deploy/aliyun/main.tf
+++ b/deploy/aliyun/main.tf
@@ -71,17 +71,17 @@ module "tidb-cluster" {
helm = helm.default
}
- cluster_name = "my-cluster"
+ cluster_name = var.tidb_cluster_name
ack = module.tidb-operator
- tidb_version = var.tidb_version
- tidb_cluster_chart_version = var.tidb_cluster_chart_version
- pd_instance_type = var.pd_instance_type
- pd_count = var.pd_count
- tikv_instance_type = var.tikv_instance_type
- tikv_count = var.tikv_count
- tidb_instance_type = var.tidb_instance_type
- tidb_count = var.tidb_count
- monitor_instance_type = var.monitor_instance_type
- override_values = file("my-cluster.yaml")
+ tidb_version = var.tidb_version
+ tidb_cluster_chart_version = var.tidb_cluster_chart_version
+ pd_instance_type = var.pd_instance_type
+ pd_count = var.pd_count
+ tikv_instance_type = var.tikv_instance_type
+ tikv_count = var.tikv_count
+ tidb_instance_type = var.tidb_instance_type
+ tidb_count = var.tidb_count
+ monitor_instance_type = var.monitor_instance_type
+ create_tidb_cluster_release = var.create_tidb_cluster_release
}
diff --git a/deploy/aliyun/manifests/db-monitor.yaml.example b/deploy/aliyun/manifests/db-monitor.yaml.example
new file mode 100644
index 0000000000..243e935e8f
--- /dev/null
+++ b/deploy/aliyun/manifests/db-monitor.yaml.example
@@ -0,0 +1,86 @@
+apiVersion: pingcap.com/v1alpha1
+kind: TidbMonitor
+metadata:
+ name: TIDB_CLUSTER_NAME
+spec:
+ alertmanagerURL: ""
+ annotations: {}
+ clusters:
+ - name: TIDB_CLUSTER_NAME
+ grafana:
+ baseImage: grafana/grafana
+ envs:
+ # Configure Grafana using environment variables except GF_PATHS_DATA, GF_SECURITY_ADMIN_USER and GF_SECURITY_ADMIN_PASSWORD
+ # Ref https://grafana.com/docs/installation/configuration/#using-environment-variables
+ GF_AUTH_ANONYMOUS_ENABLED: "true"
+ GF_AUTH_ANONYMOUS_ORG_NAME: "Main Org."
+ GF_AUTH_ANONYMOUS_ORG_ROLE: "Viewer"
+ # if grafana is running behind a reverse proxy with subpath http://foo.bar/grafana
+ # GF_SERVER_DOMAIN: foo.bar
+ # GF_SERVER_ROOT_URL: "%(protocol)s://%(domain)s/grafana/"
+ imagePullPolicy: IfNotPresent
+ logLevel: info
+ password: admin
+ resources: {}
+ # limits:
+ # cpu: 8000m
+ # memory: 8Gi
+ # requests:
+ # cpu: 4000m
+ # memory: 4Gi
+ service:
+ portName: http-grafana
+ type: LoadBalancer
+ annotations:
+ service.beta.kubernetes.io/alicloud-loadbalancer-address-type: internet
+ username: admin
+ version: 6.0.1
+ imagePullPolicy: IfNotPresent
+ initializer:
+ baseImage: pingcap/tidb-monitor-initializer
+ imagePullPolicy: IfNotPresent
+ resources: {}
+ # limits:
+ # cpu: 50m
+ # memory: 64Mi
+ # requests:
+ # cpu: 50m
+ # memory: 64Mi
+ version: v3.0.12
+ kubePrometheusURL: ""
+ nodeSelector: {}
+ persistent: true
+ prometheus:
+ baseImage: prom/prometheus
+ imagePullPolicy: IfNotPresent
+ logLevel: info
+ reserveDays: 12
+ resources: {}
+ # limits:
+ # cpu: 8000m
+ # memory: 8Gi
+ # requests:
+ # cpu: 4000m
+ # memory: 4Gi
+ service:
+ portName: http-prometheus
+ type: NodePort
+ version: v2.11.1
+ reloader:
+ baseImage: pingcap/tidb-monitor-reloader
+ imagePullPolicy: IfNotPresent
+ resources: {}
+ # limits:
+ # cpu: 50m
+ # memory: 64Mi
+ # requests:
+ # cpu: 50m
+ # memory: 64Mi
+ service:
+ portName: tcp-reloader
+ type: NodePort
+ version: v1.0.1
+ storage: 100Gi
+ storageClassName: alicloud-disk-available
+ tolerations: []
+
diff --git a/deploy/aliyun/manifests/db.yaml.example b/deploy/aliyun/manifests/db.yaml.example
new file mode 100644
index 0000000000..f5b3a37448
--- /dev/null
+++ b/deploy/aliyun/manifests/db.yaml.example
@@ -0,0 +1,110 @@
+apiVersion: pingcap.com/v1alpha1
+kind: TidbCluster
+metadata:
+ name: TIDB_CLUSTER_NAME
+spec:
+ configUpdateStrategy: RollingUpdate
+ enableTLSCluster: false
+ helper:
+ image: busybox:1.31.1
+ hostNetwork: false
+ imagePullPolicy: IfNotPresent
+ pd:
+ affinity: {}
+ baseImage: pingcap/pd
+ config:
+ log:
+ level: info
+ nodeSelector:
+ dedicated: TIDB_CLUSTER_NAME-pd
+ podSecurityContext: {}
+ replicas: 3
+ requests:
+ cpu: "1"
+ memory: 400Mi
+ storage: 20Gi
+ storageClassName: alicloud-disk
+ tolerations:
+ - effect: NoSchedule
+ key: dedicated
+ operator: Equal
+ value: TIDB_CLUSTER_NAME-pd
+ pvReclaimPolicy: Retain
+ schedulerName: tidb-scheduler
+ tidb:
+ affinity: {}
+ annotations:
+ tidb.pingcap.com/sysctl-init: "true"
+ baseImage: pingcap/tidb
+ config:
+ log:
+ level: info
+ performance:
+ max-procs: 0
+ tcp-keep-alive: true
+ enableTLSClient: false
+ maxFailoverCount: 3
+ nodeSelector:
+ dedicated: TIDB_CLUSTER_NAME-tidb
+ podSecurityContext:
+ sysctls:
+ - name: net.ipv4.tcp_keepalive_time
+ value: "300"
+ - name: net.ipv4.tcp_keepalive_intvl
+ value: "75"
+ - name: net.core.somaxconn
+ value: "32768"
+ replicas: 2
+ requests:
+ cpu: "1"
+ memory: 400Mi
+ separateSlowLog: true
+ service:
+ annotations:
+ service.beta.kubernetes.io/alicloud-loadbalancer-address-type: intranet
+ service.beta.kubernetes.io/alicloud-loadbalancer-slb-network-type: vpc
+ exposeStatus: true
+ externalTrafficPolicy: Local
+ type: LoadBalancer
+ slowLogTailer:
+ limits:
+ cpu: 100m
+ memory: 50Mi
+ requests:
+ cpu: 20m
+ memory: 5Mi
+ tolerations:
+ - effect: NoSchedule
+ key: dedicated
+ operator: Equal
+ value: TIDB_CLUSTER_NAME-tidb
+ tikv:
+ affinity: {}
+ annotations:
+ tidb.pingcap.com/sysctl-init: "true"
+ baseImage: pingcap/tikv
+ config:
+ log-level: info
+ hostNetwork: false
+ maxFailoverCount: 3
+ nodeSelector:
+ dedicated: TIDB_CLUSTER_NAME-tikv
+ podSecurityContext:
+ sysctls:
+ - name: net.core.somaxconn
+ value: "32768"
+ privileged: false
+ replicas: 3
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ storage: 100Gi
+ storageClassName: local-volume
+ tolerations:
+ - effect: NoSchedule
+ key: dedicated
+ operator: Equal
+ value: TIDB_CLUSTER_NAME-tikv
+ timezone: UTC
+ version: v3.0.12
+
diff --git a/deploy/aliyun/variables.tf b/deploy/aliyun/variables.tf
index c783235497..843f163fbf 100644
--- a/deploy/aliyun/variables.tf
+++ b/deploy/aliyun/variables.tf
@@ -10,7 +10,7 @@ variable "bastion_cpu_core_count" {
variable "operator_version" {
type = string
- default = "v1.0.6"
+ default = "v1.1.0"
}
variable "operator_helm_values" {
@@ -112,3 +112,13 @@ variable "vpc_cidr" {
description = "VPC cidr_block, options: [192.168.0.0.0/16, 172.16.0.0/16, 10.0.0.0/8], cannot collidate with kubernetes service cidr and pod cidr. Cannot change once the vpc created."
default = "192.168.0.0/16"
}
+
+variable "create_tidb_cluster_release" {
+ description = "whether creating tidb-cluster helm release"
+ default = false
+}
+
+variable "tidb_cluster_name" {
+ description = "The TiDB cluster name"
+ default = "my-cluster"
+}
diff --git a/deploy/aws/clusters.tf b/deploy/aws/clusters.tf
index ac3d9ff777..a8bf9691bd 100644
--- a/deploy/aws/clusters.tf
+++ b/deploy/aws/clusters.tf
@@ -17,25 +17,24 @@ provider "helm" {
}
# TiDB cluster declaration example
-#module "example-cluster" {
-# source = "./tidb-cluster"
-# eks_info = local.default_eks
-# subnets = local.default_subnets
-#
-# # NOTE: cluster_name cannot be changed after creation
-# cluster_name = "demo-cluster"
-# cluster_version = "v3.0.8"
-# ssh_key_name = module.key-pair.key_name
-# pd_count = 1
-# pd_instance_type = "t2.xlarge"
-# tikv_count = 1
-# tikv_instance_type = "t2.xlarge"
-# tidb_count = 1
-# tidb_instance_type = "t2.xlarge"
-# monitor_instance_type = "t2.xlarge"
-# # yaml file that passed to helm to customize the release
-# override_values = file("values/example.yaml")
-#}
+# module example-cluster {
+# source = "../modules/aws/tidb-cluster"
+
+# eks = local.eks
+# subnets = local.subnets
+# region = var.region
+# cluster_name = "example"
+
+# ssh_key_name = module.key-pair.key_name
+# pd_count = 1
+# pd_instance_type = "c5.large"
+# tikv_count = 1
+# tikv_instance_type = "c5d.large"
+# tidb_count = 1
+# tidb_instance_type = "c4.large"
+# monitor_instance_type = "c5.large"
+# create_tidb_cluster_release = false
+# }
module "default-cluster" {
providers = {
@@ -46,15 +45,15 @@ module "default-cluster" {
subnets = local.subnets
region = var.region
- cluster_name = var.default_cluster_name
- cluster_version = var.default_cluster_version
- ssh_key_name = module.key-pair.key_name
- pd_count = var.default_cluster_pd_count
- pd_instance_type = var.default_cluster_pd_instance_type
- tikv_count = var.default_cluster_tikv_count
- tikv_instance_type = var.default_cluster_tikv_instance_type
- tidb_count = var.default_cluster_tidb_count
- tidb_instance_type = var.default_cluster_tidb_instance_type
- monitor_instance_type = var.default_cluster_monitor_instance_type
- override_values = file("default-cluster.yaml")
+ cluster_name = var.default_cluster_name
+ cluster_version = var.default_cluster_version
+ ssh_key_name = module.key-pair.key_name
+ pd_count = var.default_cluster_pd_count
+ pd_instance_type = var.default_cluster_pd_instance_type
+ tikv_count = var.default_cluster_tikv_count
+ tikv_instance_type = var.default_cluster_tikv_instance_type
+ tidb_count = var.default_cluster_tidb_count
+ tidb_instance_type = var.default_cluster_tidb_instance_type
+ monitor_instance_type = var.default_cluster_monitor_instance_type
+ create_tidb_cluster_release = var.create_tidb_cluster_release
}
diff --git a/deploy/aws/manifests/db-monitor.yaml.example b/deploy/aws/manifests/db-monitor.yaml.example
new file mode 100644
index 0000000000..da607309b4
--- /dev/null
+++ b/deploy/aws/manifests/db-monitor.yaml.example
@@ -0,0 +1,84 @@
+apiVersion: pingcap.com/v1alpha1
+kind: TidbMonitor
+metadata:
+ name: CLUSTER_NAME
+spec:
+ alertmanagerURL: ""
+ annotations: {}
+ clusters:
+ - name: CLUSTER_NAME
+ grafana:
+ baseImage: grafana/grafana
+ envs:
+ # Configure Grafana using environment variables except GF_PATHS_DATA, GF_SECURITY_ADMIN_USER and GF_SECURITY_ADMIN_PASSWORD
+ # Ref https://grafana.com/docs/installation/configuration/#using-environment-variables
+ GF_AUTH_ANONYMOUS_ENABLED: "true"
+ GF_AUTH_ANONYMOUS_ORG_NAME: "Main Org."
+ GF_AUTH_ANONYMOUS_ORG_ROLE: "Viewer"
+ # if grafana is running behind a reverse proxy with subpath http://foo.bar/grafana
+ # GF_SERVER_DOMAIN: foo.bar
+ # GF_SERVER_ROOT_URL: "%(protocol)s://%(domain)s/grafana/"
+ imagePullPolicy: IfNotPresent
+ logLevel: info
+ password: admin
+ resources: {}
+ # limits:
+ # cpu: 8000m
+ # memory: 8Gi
+ # requests:
+ # cpu: 4000m
+ # memory: 4Gi
+ service:
+ portName: http-grafana
+ type: LoadBalancer
+ username: admin
+ version: 6.0.1
+ imagePullPolicy: IfNotPresent
+ initializer:
+ baseImage: pingcap/tidb-monitor-initializer
+ imagePullPolicy: IfNotPresent
+ resources: {}
+ # limits:
+ # cpu: 50m
+ # memory: 64Mi
+ # requests:
+ # cpu: 50m
+ # memory: 64Mi
+ version: v3.0.12
+ kubePrometheusURL: ""
+ nodeSelector: {}
+ persistent: true
+ prometheus:
+ baseImage: prom/prometheus
+ imagePullPolicy: IfNotPresent
+ logLevel: info
+ reserveDays: 12
+ resources: {}
+ # limits:
+ # cpu: 8000m
+ # memory: 8Gi
+ # requests:
+ # cpu: 4000m
+ # memory: 4Gi
+ service:
+ portName: http-prometheus
+ type: NodePort
+ version: v2.11.1
+ reloader:
+ baseImage: pingcap/tidb-monitor-reloader
+ imagePullPolicy: IfNotPresent
+ resources: {}
+ # limits:
+ # cpu: 50m
+ # memory: 64Mi
+ # requests:
+ # cpu: 50m
+ # memory: 64Mi
+ service:
+ portName: tcp-reloader
+ type: NodePort
+ version: v1.0.1
+ storage: 100Gi
+ storageClassName: ebs-gp2
+ tolerations: []
+
diff --git a/deploy/aws/manifests/db.yaml.example b/deploy/aws/manifests/db.yaml.example
new file mode 100644
index 0000000000..5a4eb9c2bc
--- /dev/null
+++ b/deploy/aws/manifests/db.yaml.example
@@ -0,0 +1,108 @@
+apiVersion: pingcap.com/v1alpha1
+kind: TidbCluster
+metadata:
+ name: CLUSTER_NAME
+spec:
+ configUpdateStrategy: RollingUpdate
+ enableTLSCluster: false
+ helper:
+ image: busybox:1.31.1
+ hostNetwork: false
+ imagePullPolicy: IfNotPresent
+ pd:
+ affinity: {}
+ baseImage: pingcap/pd
+ config:
+ log:
+ level: info
+ replication:
+ location-labels:
+ - zone
+ max-replicas: 3
+ nodeSelector:
+ dedicated: CLUSTER_NAME-pd
+ podSecurityContext: {}
+ replicas: 3
+ requests:
+ cpu: "1"
+ memory: 400Mi
+ storage: 1Gi
+ storageClassName: ebs-gp2
+ tolerations:
+ - effect: NoSchedule
+ key: dedicated
+ operator: Equal
+ value: CLUSTER_NAME-pd
+ pvReclaimPolicy: Retain
+ schedulerName: tidb-scheduler
+ tidb:
+ affinity: {}
+ baseImage: pingcap/tidb
+ config:
+ log:
+ level: info
+ performance:
+ max-procs: 0
+ tcp-keep-alive: true
+ enableTLSClient: false
+ maxFailoverCount: 3
+ nodeSelector:
+ dedicated: CLUSTER_NAME-tidb
+ podSecurityContext:
+ sysctls:
+ - name: net.ipv4.tcp_keepalive_time
+ value: "300"
+ - name: net.ipv4.tcp_keepalive_intvl
+ value: "75"
+ - name: net.core.somaxconn
+ value: "32768"
+ replicas: 2
+ requests:
+ cpu: "1"
+ memory: 400Mi
+ separateSlowLog: true
+ service:
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: 'true'
+ service.beta.kubernetes.io/aws-load-balancer-internal: '0.0.0.0/0'
+ service.beta.kubernetes.io/aws-load-balancer-type: nlb
+ exposeStatus: true
+ externalTrafficPolicy: Local
+ type: LoadBalancer
+ slowLogTailer:
+ limits:
+ cpu: 100m
+ memory: 50Mi
+ requests:
+ cpu: 20m
+ memory: 5Mi
+ tolerations:
+ - effect: NoSchedule
+ key: dedicated
+ operator: Equal
+ value: CLUSTER_NAME-tidb
+ tikv:
+ affinity: {}
+ baseImage: pingcap/tikv
+ config:
+ log-level: info
+ hostNetwork: false
+ maxFailoverCount: 3
+ nodeSelector:
+ dedicated: CLUSTER_NAME-tikv
+ podSecurityContext: {}
+ privileged: false
+ replicas: 3
+ requests:
+ cpu: "1"
+ memory: 2Gi
+ storage: 45Gi
+ storageClassName: local-storage
+ tolerations:
+ - effect: NoSchedule
+ key: dedicated
+ operator: Equal
+ value: CLUSTER_NAME-tikv
+ timezone: UTC
+ version: v3.0.12
+
diff --git a/deploy/aws/variables.tf b/deploy/aws/variables.tf
index 0ad33b44f7..7691663a5c 100644
--- a/deploy/aws/variables.tf
+++ b/deploy/aws/variables.tf
@@ -19,7 +19,7 @@ variable "eks_version" {
variable "operator_version" {
description = "TiDB operator version"
- default = "v1.0.6"
+ default = "v1.1.0"
}
variable "operator_values" {
@@ -115,3 +115,7 @@ variable "default_cluster_name" {
default = "my-cluster"
}
+variable "create_tidb_cluster_release" {
+ description = "whether creating tidb-cluster helm release"
+ default = false
+}
diff --git a/deploy/gcp/examples/multi-zonal.tfvars b/deploy/gcp/examples/multi-zonal.tfvars
index 17aa938cc0..6824c0772d 100644
--- a/deploy/gcp/examples/multi-zonal.tfvars
+++ b/deploy/gcp/examples/multi-zonal.tfvars
@@ -2,12 +2,12 @@
# This will create a zonal cluster in zone us-central1-b with one additional zone.
# Work nodes will be created in primary zone us-central1-b and additional zone us-central1-c.
#
-gke_name = "multi-zonal"
-vpc_name = "multi-zonal"
-location = "us-central1-b"
+gke_name = "multi-zonal"
+vpc_name = "multi-zonal"
+location = "us-central1-b"
pd_instance_type = "n1-standard-2"
tikv_instance_type = "n1-highmem-4"
tidb_instance_type = "n1-standard-8"
node_locations = [
- "us-central1-c"
+ "us-central1-c"
]
diff --git a/deploy/gcp/examples/single-zonal.tfvars b/deploy/gcp/examples/single-zonal.tfvars
index 4b073e6e2c..568c6c8759 100644
--- a/deploy/gcp/examples/single-zonal.tfvars
+++ b/deploy/gcp/examples/single-zonal.tfvars
@@ -2,12 +2,12 @@
# This will create a zonal cluster in zone us-central1-b without additional zones.
# Work nodes will be created in a single zone only.
#
-gke_name = "single-zonal"
-vpc_name = "single-zonal"
-location = "us-central1-b"
+gke_name = "single-zonal"
+vpc_name = "single-zonal"
+location = "us-central1-b"
pd_instance_type = "n1-standard-2"
tikv_instance_type = "n1-highmem-4"
tidb_instance_type = "n1-standard-8"
-pd_count = 3
-tikv_count = 3
-tidb_count = 3
+pd_count = 3
+tikv_count = 3
+tidb_count = 3
diff --git a/deploy/gcp/examples/tidb-customized.tfvars b/deploy/gcp/examples/tidb-customized.tfvars
deleted file mode 100644
index f2ca40b6d4..0000000000
--- a/deploy/gcp/examples/tidb-customized.tfvars
+++ /dev/null
@@ -1,25 +0,0 @@
-pd_instance_type = "n1-standard-2"
-tikv_instance_type = "n1-highmem-4"
-tidb_instance_type = "n1-standard-8"
-
-# specify tidb version
-tidb_version = "3.0.8"
-
-# override tidb cluster values
-override_values = < "${local.kubeconfig}"
+EOS
+ }
+}
+
provider "helm" {
alias = "initial"
insecure = true
# service_account = "tiller"
install_tiller = false # currently this doesn't work, so we install tiller in the local-exec provisioner. See https://github.com/terraform-providers/terraform-provider-helm/issues/148
kubernetes {
- config_path = local_file.kubeconfig.filename
+ config_path = local.kubeconfig
+ # used to delay helm provisioner initialization in apply phrase
+ load_config_file = null_resource.kubeconfig.id != "" ? true : null
}
}
diff --git a/deploy/modules/gcp/tidb-cluster/data.tf b/deploy/modules/gcp/tidb-cluster/data.tf
index d8cbd84ab8..6250b5aed0 100644
--- a/deploy/modules/gcp/tidb-cluster/data.tf
+++ b/deploy/modules/gcp/tidb-cluster/data.tf
@@ -1,21 +1,21 @@
data "external" "tidb_ilb_ip" {
depends_on = [null_resource.wait-lb-ip]
- program = ["bash", "-c", "kubectl --kubeconfig ${var.kubeconfig_path} get svc -n ${var.cluster_name} ${var.cluster_name}-tidb -o json | jq '.status.loadBalancer.ingress[0]'"]
+ program = ["bash", "-c", local.cmd_get_tidb_ilb_ip]
}
data "external" "monitor_lb_ip" {
depends_on = [null_resource.wait-lb-ip]
- program = ["bash", "-c", "kubectl --kubeconfig ${var.kubeconfig_path} get svc -n ${var.cluster_name} ${var.cluster_name}-grafana -o json | jq '.status.loadBalancer.ingress[0]'"]
+ program = ["bash", "-c", local.cmd_get_monitor_lb_ip]
}
data "external" "tidb_port" {
depends_on = [null_resource.wait-lb-ip]
- program = ["bash", "-c", "kubectl --kubeconfig ${var.kubeconfig_path} get svc -n ${var.cluster_name} ${var.cluster_name}-tidb -o json | jq '.spec.ports | .[] | select( .name == \"mysql-client\") | {port: .port|tostring}'"]
+ program = ["bash", "-c", local.cmd_get_tidb_port]
}
data "external" "monitor_port" {
depends_on = [null_resource.wait-lb-ip]
- program = ["bash", "-c", "kubectl --kubeconfig ${var.kubeconfig_path} get svc -n ${var.cluster_name} ${var.cluster_name}-grafana -o json | jq '.spec.ports | .[] | select( .name == \"grafana\") | {port: .port|tostring}'"]
+ program = ["bash", "-c", local.cmd_get_monitor_port]
}
locals {
@@ -24,6 +24,22 @@ locals {
# TODO Update related code when node locations is avaiable in attributes of cluster resource.
cmd_get_cluster_locations = < 0 then .[0].locations | join(",") else "" end) }'
+EOT
+ cmd_get_tidb_ilb_ip = </dev/null) || true
+jq -s '.[0].status.loadBalancer.ingress[0] // {"ip":""}' <<<"$output"
+EOT
+ cmd_get_monitor_lb_ip = </dev/null) || true
+jq -s '.[0].status.loadBalancer.ingress[0] // {"ip":""}' <<<"$output"
+EOT
+ cmd_get_tidb_port = </dev/null) || true
+jq -s 'try (.[0].spec.ports | .[] | select( .name == "mysql-client") | {port: .port|tostring}) catch {"port":""}' <<<"$otuput"
+EOT
+ cmd_get_monitor_port = </dev/null) || true
+jq -s 'try (.[0].spec.ports | .[] | select( .name == "grafana") | {port: .port|tostring}) catch {"port":""}' <<<"$otuput"
EOT
}
diff --git a/deploy/modules/gcp/tidb-cluster/main.tf b/deploy/modules/gcp/tidb-cluster/main.tf
index 7166ee7787..9aa76c3bdb 100644
--- a/deploy/modules/gcp/tidb-cluster/main.tf
+++ b/deploy/modules/gcp/tidb-cluster/main.tf
@@ -135,6 +135,7 @@ locals {
module "tidb-cluster" {
source = "../../share/tidb-cluster-release"
+ create = var.create_tidb_cluster_release
cluster_name = var.cluster_name
pd_count = var.pd_node_count * local.num_availability_zones
tikv_count = var.tikv_node_count * local.num_availability_zones
@@ -149,6 +150,7 @@ module "tidb-cluster" {
}
resource "null_resource" "wait-lb-ip" {
+ count = var.create_tidb_cluster_release == true ? 1 : 0
depends_on = [
module.tidb-cluster
]
diff --git a/deploy/modules/gcp/tidb-cluster/variables.tf b/deploy/modules/gcp/tidb-cluster/variables.tf
index db730b919d..258b667b1f 100644
--- a/deploy/modules/gcp/tidb-cluster/variables.tf
+++ b/deploy/modules/gcp/tidb-cluster/variables.tf
@@ -72,3 +72,8 @@ variable "tikv_local_ssd_count" {
description = "TiKV node pool local ssd count (cannot be changed after the node pool is created)"
default = 1
}
+
+variable "create_tidb_cluster_release" {
+ description = "Whether create tidb-cluster release in the node pools automatically"
+ default = true
+}
diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md
index 472cd3c611..4a7900e1dd 100644
--- a/docs/CONTRIBUTING.md
+++ b/docs/CONTRIBUTING.md
@@ -14,14 +14,6 @@ $ export GOPATH=$HOME/go
$ export PATH=$PATH:$GOPATH/bin
```
-## Dependency management
-
-TiDB Operator uses [retool](https://github.com/twitchtv/retool) to manage Go related tools.
-
-```sh
-$ go get -u github.com/twitchtv/retool
-```
-
## Workflow
### Step 1: Fork TiDB Operator on GitHub
@@ -112,6 +104,49 @@ $ make check
This will show errors if your code change does not pass checks (e.g. fmt,
lint). Please fix them before submitting the PR.
+#### Start tidb-operator locally and do manual tests
+
+We uses [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) to
+start a Kubernetes cluster locally and
+[kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) must be
+installed to access Kubernetes cluster.
+
+You can refer to their official references to install them on your machine, or
+run the following command to install them into our local binary directory:
+`output/bin`.
+
+```
+$ hack/install-up-operator.sh -i
+$ export PATH=$(pwd)/output/bin:$PATH
+```
+
+Make sure they are installed correctly:
+
+```
+$ kind --version
+...
+$ kubectl version --client
+...
+```
+
+Create a Kubernetes cluster with `kind`:
+
+```
+$ kind create cluster
+```
+
+Build and run tidb-operator:
+
+```
+$ ./hack/local-up-operator.sh
+```
+
+Start a basic TiDB cluster:
+
+```
+$ kubectl apply -f examples/basic/tidb-cluster.yaml
+```
+
#### Run unit tests
Before running your code in a real Kubernetes cluster, make sure it passes all unit tests.
diff --git a/docs/aliyun-tutorial.md b/docs/aliyun-tutorial.md
deleted file mode 100644
index 44fdda744d..0000000000
--- a/docs/aliyun-tutorial.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Deploy on Aliyun (Alibaba Cloud)
-
-This document has been moved to [https://pingcap.com/docs/v3.0/tidb-in-kubernetes/deploy/alibaba-cloud/](https://pingcap.com/docs/v3.0/tidb-in-kubernetes/deploy/alibaba-cloud/).
diff --git a/docs/api-references/config.json b/docs/api-references/config.json
new file mode 100644
index 0000000000..f0888cb044
--- /dev/null
+++ b/docs/api-references/config.json
@@ -0,0 +1,64 @@
+{
+ "hideMemberFields": [
+ "TypeMeta",
+ "TmpPath",
+ "DisplayName",
+ "DefaultProfile",
+ "Path",
+ "ListenHost",
+ "TCPPort",
+ "HTTPPort",
+ "InternalServerHTTPPort",
+ "ErrorLog",
+ "ServerLog",
+ "TiDBStatusAddr",
+ "ServiceAddr",
+ "ProxyConfig",
+ "ClusterManagerPath",
+ "Flash",
+ "FlashStatus",
+ "FlashQuota",
+ "FlashUser",
+ "FlashProfile",
+ "FlashApplication",
+ "FlashProxy",
+ "FlashRaft",
+ "ClusterLog"
+ ],
+ "hideTypePatterns": [
+ "ParseError$",
+ "List$",
+ "DataResource",
+ "ProxyConfig",
+ "^Flash$",
+ "FlashCluster",
+ "FlashStatus",
+ "FlashQuota",
+ "FlashUser",
+ "FlashProfile",
+ "FlashApplication",
+ "FlashProxy",
+ "FlashServerConfig",
+ "FlashRaft"
+ ],
+ "externalPackages": [
+ {
+ "typeMatchPrefix": "^k8s\\.io/apimachinery/pkg/apis/meta/v1\\.Duration$",
+ "docsURLTemplate": "https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration"
+ },
+ {
+ "typeMatchPrefix": "^k8s\\.io/(api|apimachinery/pkg/apis)/",
+ "docsURLTemplate": "https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#{{lower .TypeIdentifier}}-{{arrIndex .PackageSegments -1}}-{{arrIndex .PackageSegments -2}}"
+ },
+ {
+ "typeMatchPrefix": "^github\\.com/knative/pkg/apis/duck/",
+ "docsURLTemplate": "https://godoc.org/github.com/knative/pkg/apis/duck/{{arrIndex .PackageSegments -1}}#{{.TypeIdentifier}}"
+ }
+ ],
+ "typeDisplayNamePrefixOverrides": {
+ "k8s.io/api/": "Kubernetes ",
+ "k8s.io/apimachinery/pkg/apis/": "Kubernetes "
+ },
+ "markdownDisabled": false
+}
+
diff --git a/docs/api-references/docs.md b/docs/api-references/docs.md
new file mode 100644
index 0000000000..a676271b16
--- /dev/null
+++ b/docs/api-references/docs.md
@@ -0,0 +1,14277 @@
+---
+title: TiDB Operator API Document
+summary: Reference of TiDB Operator API
+category: how-to
+---
+API Document
+Packages:
+
+pingcap.com/v1alpha1
+
+
Package v1alpha1 is the v1alpha1 version of the API.
+
+Resource Types:
+
+Backup
+
+
+
Backup is a backup of tidb cluster.
+
+
+
+
+Field
+Description
+
+
+
+
+
+apiVersion
+string
+
+
+pingcap.com/v1alpha1
+
+
+
+
+
+kind
+string
+
+Backup
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata
field.
+
+
+
+
+spec
+
+
+BackupSpec
+
+
+
+
+
+
+
+
+
+from
+
+
+TiDBAccessConfig
+
+
+
+
+From is the tidb cluster that needs to backup.
+
+
+
+
+backupType
+
+
+BackupType
+
+
+
+
+Type is the backup type for tidb cluster.
+
+
+
+
+tikvGCLifeTime
+
+string
+
+
+
+TikvGCLifeTime is to specify the safe gc life time for backup.
+The time limit during which data is retained for each GC, in the format of Go Duration.
+When a GC happens, the current time minus this value is the safe point.
+
+
+
+
+StorageProvider
+
+
+StorageProvider
+
+
+
+
+
+(Members of StorageProvider
are embedded into this type.)
+
+StorageProvider configures where and how backups should be stored.
+
+
+
+
+storageClassName
+
+string
+
+
+
+(Optional)
+The storageClassName of the persistent volume for Backup data storage.
+Defaults to Kubernetes default storage class.
+
+
+
+
+storageSize
+
+string
+
+
+
+StorageSize is the request storage size for backup job
+
+
+
+
+br
+
+
+BRConfig
+
+
+
+
+BRConfig is the configs for BR
+
+
+
+
+tolerations
+
+
+[]Kubernetes core/v1.Toleration
+
+
+
+
+(Optional)
+Base tolerations of backup Pods, components may add more tolerations upon this respectively
+
+
+
+
+affinity
+
+
+Kubernetes core/v1.Affinity
+
+
+
+
+(Optional)
+Affinity of backup Pods
+
+
+
+
+useKMS
+
+bool
+
+
+
+Use KMS to decrypt the secrets
+
+
+
+
+serviceAccount
+
+string
+
+
+
+Specify service account of backup
+
+
+
+
+
+
+
+status
+
+
+BackupStatus
+
+
+
+
+
+
+
+
+BackupSchedule
+
+
+
BackupSchedule is a backup schedule of tidb cluster.
+
+
+
+
+Field
+Description
+
+
+
+
+
+apiVersion
+string
+
+
+pingcap.com/v1alpha1
+
+
+
+
+
+kind
+string
+
+BackupSchedule
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata
field.
+
+
+
+
+spec
+
+
+BackupScheduleSpec
+
+
+
+
+
+
+
+
+
+schedule
+
+string
+
+
+
+Schedule specifies the cron string used for backup scheduling.
+
+
+
+
+pause
+
+bool
+
+
+
+Pause means paused backupSchedule
+
+
+
+
+maxBackups
+
+int32
+
+
+
+MaxBackups is to specify how many backups we want to keep
+0 is magic number to indicate un-limited backups.
+
+
+
+
+maxReservedTime
+
+string
+
+
+
+MaxReservedTime is to specify how long backups we want to keep.
+
+
+
+
+backupTemplate
+
+
+BackupSpec
+
+
+
+
+BackupTemplate is the specification of the backup structure to get scheduled.
+
+
+
+
+storageClassName
+
+string
+
+
+
+(Optional)
+The storageClassName of the persistent volume for Backup data storage if not storage class name set in BackupSpec.
+Defaults to Kubernetes default storage class.
+
+
+
+
+storageSize
+
+string
+
+
+
+StorageSize is the request storage size for backup job
+
+
+
+
+
+
+
+status
+
+
+BackupScheduleStatus
+
+
+
+
+
+
+
+
+Restore
+
+
+
Restore represents the restoration of backup of a tidb cluster.
+
+
+
+
+Field
+Description
+
+
+
+
+
+apiVersion
+string
+
+
+pingcap.com/v1alpha1
+
+
+
+
+
+kind
+string
+
+Restore
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata
field.
+
+
+
+
+spec
+
+
+RestoreSpec
+
+
+
+
+
+
+
+
+
+to
+
+
+TiDBAccessConfig
+
+
+
+
+To is the tidb cluster that needs to restore.
+
+
+
+
+backupType
+
+
+BackupType
+
+
+
+
+Type is the backup type for tidb cluster.
+
+
+
+
+tikvGCLifeTime
+
+string
+
+
+
+TikvGCLifeTime is to specify the safe gc life time for restore.
+The time limit during which data is retained for each GC, in the format of Go Duration.
+When a GC happens, the current time minus this value is the safe point.
+
+
+
+
+StorageProvider
+
+
+StorageProvider
+
+
+
+
+
+(Members of StorageProvider
are embedded into this type.)
+
+StorageProvider configures where and how backups should be stored.
+
+
+
+
+storageClassName
+
+string
+
+
+
+(Optional)
+The storageClassName of the persistent volume for Restore data storage.
+Defaults to Kubernetes default storage class.
+
+
+
+
+storageSize
+
+string
+
+
+
+StorageSize is the request storage size for backup job
+
+
+
+
+br
+
+
+BRConfig
+
+
+
+
+BR is the configs for BR.
+
+
+
+
+tolerations
+
+
+[]Kubernetes core/v1.Toleration
+
+
+
+
+(Optional)
+Base tolerations of restore Pods, components may add more tolerations upon this respectively
+
+
+
+
+affinity
+
+
+Kubernetes core/v1.Affinity
+
+
+
+
+(Optional)
+Affinity of restore Pods
+
+
+
+
+useKMS
+
+bool
+
+
+
+Use KMS to decrypt the secrets
+
+
+
+
+serviceAccount
+
+string
+
+
+
+Specify service account of restore
+
+
+
+
+
+
+
+status
+
+
+RestoreStatus
+
+
+
+
+
+
+
+
+TidbCluster
+
+
+
TidbCluster is the control script’s spec
+
+
+
+
+Field
+Description
+
+
+
+
+
+apiVersion
+string
+
+
+pingcap.com/v1alpha1
+
+
+
+
+
+kind
+string
+
+TidbCluster
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata
field.
+
+
+
+
+spec
+
+
+TidbClusterSpec
+
+
+
+
+Spec defines the behavior of a tidb cluster
+
+
+
+
+
+pd
+
+
+PDSpec
+
+
+
+
+PD cluster spec
+
+
+
+
+tidb
+
+
+TiDBSpec
+
+
+
+
+TiDB cluster spec
+
+
+
+
+tikv
+
+
+TiKVSpec
+
+
+
+
+TiKV cluster spec
+
+
+
+
+tiflash
+
+
+TiFlashSpec
+
+
+
+
+(Optional)
+TiFlash cluster spec
+
+
+
+
+pump
+
+
+PumpSpec
+
+
+
+
+(Optional)
+Pump cluster spec
+
+
+
+
+helper
+
+
+HelperSpec
+
+
+
+
+(Optional)
+Helper spec
+
+
+
+
+paused
+
+bool
+
+
+
+(Optional)
+Indicates that the tidb cluster is paused and will not be processed by
+the controller.
+
+
+
+
+version
+
+string
+
+
+
+(Optional)
+TODO: remove optional after defaulting logic introduced
+TiDB cluster version
+
+
+
+
+schedulerName
+
+string
+
+
+
+SchedulerName of TiDB cluster Pods
+
+
+
+
+pvReclaimPolicy
+
+
+Kubernetes core/v1.PersistentVolumeReclaimPolicy
+
+
+
+
+Persistent volume reclaim policy applied to the PVs that consumed by TiDB cluster
+
+
+
+
+imagePullPolicy
+
+
+Kubernetes core/v1.PullPolicy
+
+
+
+
+ImagePullPolicy of TiDB cluster Pods
+
+
+
+
+configUpdateStrategy
+
+
+ConfigUpdateStrategy
+
+
+
+
+ConfigUpdateStrategy determines how the configuration change is applied to the cluster.
+UpdateStrategyInPlace will update the ConfigMap of configuration in-place and an extra rolling-update of the
+cluster component is needed to reload the configuration change.
+UpdateStrategyRollingUpdate will create a new ConfigMap with the new configuration and rolling-update the
+related components to use the new ConfigMap, that is, the new configuration will be applied automatically.
+
+
+
+
+enablePVReclaim
+
+bool
+
+
+
+(Optional)
+Whether enable PVC reclaim for orphan PVC left by statefulset scale-in
+Optional: Defaults to false
+
+
+
+
+tlsCluster
+
+
+TLSCluster
+
+
+
+
+(Optional)
+Whether enable the TLS connection between TiDB server components
+Optional: Defaults to nil
+
+
+
+
+hostNetwork
+
+bool
+
+
+
+(Optional)
+Whether Hostnetwork is enabled for TiDB cluster Pods
+Optional: Defaults to false
+
+
+
+
+affinity
+
+
+Kubernetes core/v1.Affinity
+
+
+
+
+(Optional)
+Affinity of TiDB cluster Pods
+
+
+
+
+priorityClassName
+
+string
+
+
+
+(Optional)
+PriorityClassName of TiDB cluster Pods
+Optional: Defaults to omitted
+
+
+
+
+nodeSelector
+
+map[string]string
+
+
+
+(Optional)
+Base node selectors of TiDB cluster Pods, components may add or override selectors upon this respectively
+
+
+
+
+annotations
+
+map[string]string
+
+
+
+(Optional)
+Base annotations of TiDB cluster Pods, components may add or override selectors upon this respectively
+
+
+
+
+tolerations
+
+
+[]Kubernetes core/v1.Toleration
+
+
+
+
+(Optional)
+Base tolerations of TiDB cluster Pods, components may add more tolerations upon this respectively
+
+
+
+
+timezone
+
+string
+
+
+
+(Optional)
+Time zone of TiDB cluster Pods
+Optional: Defaults to UTC
+
+
+
+
+services
+
+
+[]Service
+
+
+
+
+Services list non-headless services type used in TidbCluster
+Deprecated
+
+
+
+
+
+
+
+status
+
+
+TidbClusterStatus
+
+
+
+
+Most recently observed status of the tidb cluster
+
+
+
+
+TidbClusterAutoScaler
+
+
+
TidbClusterAutoScaler is the control script’s spec
+
+
+
+
+Field
+Description
+
+
+
+
+
+apiVersion
+string
+
+
+pingcap.com/v1alpha1
+
+
+
+
+
+kind
+string
+
+TidbClusterAutoScaler
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata
field.
+
+
+
+
+spec
+
+
+TidbClusterAutoScalerSpec
+
+
+
+
+Spec describes the state of the TidbClusterAutoScaler
+
+
+
+
+
+cluster
+
+
+TidbClusterRef
+
+
+
+
+TidbClusterRef describe the target TidbCluster
+
+
+
+
+metricsUrl
+
+string
+
+
+
+(Optional)
+We used prometheus to fetch the metrics resources until the pd could provide it.
+MetricsUrl represents the url to fetch the metrics info
+
+
+
+
+monitor
+
+
+TidbMonitorRef
+
+
+
+
+(Optional)
+TidbMonitorRef describe the target TidbMonitor, when MetricsUrl and Monitor are both set,
+Operator will use MetricsUrl
+
+
+
+
+tikv
+
+
+TikvAutoScalerSpec
+
+
+
+
+(Optional)
+TiKV represents the auto-scaling spec for tikv
+
+
+
+
+tidb
+
+
+TidbAutoScalerSpec
+
+
+
+
+(Optional)
+TiDB represents the auto-scaling spec for tidb
+
+
+
+
+
+
+
+status
+
+
+TidbClusterAutoSclaerStatus
+
+
+
+
+Status describe the status of the TidbClusterAutoScaler
+
+
+
+
+TidbInitializer
+
+
+
TidbInitializer is a TiDB cluster initializing job
+
+
+
+
+Field
+Description
+
+
+
+
+
+apiVersion
+string
+
+
+pingcap.com/v1alpha1
+
+
+
+
+
+kind
+string
+
+TidbInitializer
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata
field.
+
+
+
+
+spec
+
+
+TidbInitializerSpec
+
+
+
+
+Spec defines the desired state of TidbInitializer
+
+
+
+
+
+image
+
+string
+
+
+
+
+
+
+
+cluster
+
+
+TidbClusterRef
+
+
+
+
+
+
+
+
+imagePullPolicy
+
+
+Kubernetes core/v1.PullPolicy
+
+
+
+
+(Optional)
+
+
+
+
+permitHost
+
+string
+
+
+
+(Optional)
+permitHost is the host which will only be allowed to connect to the TiDB.
+
+
+
+
+initSql
+
+string
+
+
+
+(Optional)
+InitSql is the SQL statements executed after the TiDB cluster is bootstrapped.
+
+
+
+
+initSqlConfigMap
+
+string
+
+
+
+(Optional)
+InitSqlConfigMapName reference a configmap that provide init-sql, take high precedence than initSql if set
+
+
+
+
+passwordSecret
+
+string
+
+
+
+(Optional)
+
+
+
+
+resources
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+
+
+(Optional)
+
+
+
+
+timezone
+
+string
+
+
+
+(Optional)
+Time zone of TiDB initializer Pods
+
+
+
+
+
+
+
+status
+
+
+TidbInitializerStatus
+
+
+
+
+Most recently observed status of the TidbInitializer
+
+
+
+
+TidbMonitor
+
+
+
TidbMonitor encode the spec and status of the monitoring component of a TiDB cluster
+
+
+
+
+Field
+Description
+
+
+
+
+
+apiVersion
+string
+
+
+pingcap.com/v1alpha1
+
+
+
+
+
+kind
+string
+
+TidbMonitor
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata
field.
+
+
+
+
+spec
+
+
+TidbMonitorSpec
+
+
+
+
+Spec defines the desired state of TidbMonitor
+
+
+
+
+
+
+
+status
+
+
+TidbMonitorStatus
+
+
+
+
+Most recently observed status of the TidbMonitor
+
+
+
+
+BRConfig
+
+
+(Appears on:
+BackupSpec ,
+RestoreSpec )
+
+
+
BRConfig contains config for BR
+
+
+
+
+Field
+Description
+
+
+
+
+
+cluster
+
+string
+
+
+
+ClusterName of backup/restore cluster
+
+
+
+
+clusterNamespace
+
+string
+
+
+
+Namespace of backup/restore cluster
+
+
+
+
+db
+
+string
+
+
+
+DB is the specific DB which will be backed-up or restored
+
+
+
+
+table
+
+string
+
+
+
+Table is the specific table which will be backed-up or restored
+
+
+
+
+logLevel
+
+string
+
+
+
+LogLevel is the log level
+
+
+
+
+statusAddr
+
+string
+
+
+
+StatusAddr is the HTTP listening address for the status report service. Set to empty string to disable
+
+
+
+
+concurrency
+
+uint32
+
+
+
+Concurrency is the size of thread pool on each node that execute the backup task
+
+
+
+
+rateLimit
+
+uint
+
+
+
+RateLimit is the rate limit of the backup task, MB/s per node
+
+
+
+
+timeAgo
+
+string
+
+
+
+TimeAgo is the history version of the backup task, e.g. 1m, 1h
+
+
+
+
+checksum
+
+bool
+
+
+
+Checksum specifies whether to run checksum after backup
+
+
+
+
+sendCredToTikv
+
+bool
+
+
+
+SendCredToTikv specifies whether to send credentials to TiKV
+
+
+
+
+onLine
+
+bool
+
+
+
+OnLine specifies whether online during restore
+
+
+
+
+BackupCondition
+
+
+(Appears on:
+BackupStatus )
+
+
+
BackupCondition describes the observed state of a Backup at a certain point.
+
+
+BackupConditionType
+(string
alias)
+
+(Appears on:
+BackupCondition )
+
+
+
BackupConditionType represents a valid condition of a Backup.
+
+BackupScheduleSpec
+
+
+(Appears on:
+BackupSchedule )
+
+
+
BackupScheduleSpec contains the backup schedule specification for a tidb cluster.
+
+
+
+
+Field
+Description
+
+
+
+
+
+schedule
+
+string
+
+
+
+Schedule specifies the cron string used for backup scheduling.
+
+
+
+
+pause
+
+bool
+
+
+
+Pause means paused backupSchedule
+
+
+
+
+maxBackups
+
+int32
+
+
+
+MaxBackups is to specify how many backups we want to keep
+0 is magic number to indicate un-limited backups.
+
+
+
+
+maxReservedTime
+
+string
+
+
+
+MaxReservedTime is to specify how long backups we want to keep.
+
+
+
+
+backupTemplate
+
+
+BackupSpec
+
+
+
+
+BackupTemplate is the specification of the backup structure to get scheduled.
+
+
+
+
+storageClassName
+
+string
+
+
+
+(Optional)
+The storageClassName of the persistent volume for Backup data storage if not storage class name set in BackupSpec.
+Defaults to Kubernetes default storage class.
+
+
+
+
+storageSize
+
+string
+
+
+
+StorageSize is the request storage size for backup job
+
+
+
+
+BackupScheduleStatus
+
+
+(Appears on:
+BackupSchedule )
+
+
+
BackupScheduleStatus represents the current state of a BackupSchedule.
+
+
+
+
+Field
+Description
+
+
+
+
+
+lastBackup
+
+string
+
+
+
+LastBackup represents the last backup.
+
+
+
+
+lastBackupTime
+
+
+Kubernetes meta/v1.Time
+
+
+
+
+LastBackupTime represents the last time the backup was successfully created.
+
+
+
+
+allBackupCleanTime
+
+
+Kubernetes meta/v1.Time
+
+
+
+
+AllBackupCleanTime represents the time when all backup entries are cleaned up
+
+
+
+
+BackupSpec
+
+
+(Appears on:
+Backup ,
+BackupScheduleSpec )
+
+
+
BackupSpec contains the backup specification for a tidb cluster.
+
+
+
+
+Field
+Description
+
+
+
+
+
+from
+
+
+TiDBAccessConfig
+
+
+
+
+From is the tidb cluster that needs to backup.
+
+
+
+
+backupType
+
+
+BackupType
+
+
+
+
+Type is the backup type for tidb cluster.
+
+
+
+
+tikvGCLifeTime
+
+string
+
+
+
+TikvGCLifeTime is to specify the safe gc life time for backup.
+The time limit during which data is retained for each GC, in the format of Go Duration.
+When a GC happens, the current time minus this value is the safe point.
+
+
+
+
+StorageProvider
+
+
+StorageProvider
+
+
+
+
+
+(Members of StorageProvider
are embedded into this type.)
+
+StorageProvider configures where and how backups should be stored.
+
+
+
+
+storageClassName
+
+string
+
+
+
+(Optional)
+The storageClassName of the persistent volume for Backup data storage.
+Defaults to Kubernetes default storage class.
+
+
+
+
+storageSize
+
+string
+
+
+
+StorageSize is the request storage size for backup job
+
+
+
+
+br
+
+
+BRConfig
+
+
+
+
+BRConfig is the configs for BR
+
+
+
+
+tolerations
+
+
+[]Kubernetes core/v1.Toleration
+
+
+
+
+(Optional)
+Base tolerations of backup Pods, components may add more tolerations upon this respectively
+
+
+
+
+affinity
+
+
+Kubernetes core/v1.Affinity
+
+
+
+
+(Optional)
+Affinity of backup Pods
+
+
+
+
+useKMS
+
+bool
+
+
+
+Use KMS to decrypt the secrets
+
+
+
+
+serviceAccount
+
+string
+
+
+
+Specify service account of backup
+
+
+
+
+BackupStatus
+
+
+(Appears on:
+Backup )
+
+
+
BackupStatus represents the current status of a backup.
+
+
+
+
+Field
+Description
+
+
+
+
+
+backupPath
+
+string
+
+
+
+BackupPath is the location of the backup.
+
+
+
+
+timeStarted
+
+
+Kubernetes meta/v1.Time
+
+
+
+
+TimeStarted is the time at which the backup was started.
+
+
+
+
+timeCompleted
+
+
+Kubernetes meta/v1.Time
+
+
+
+
+TimeCompleted is the time at which the backup was completed.
+
+
+
+
+backupSize
+
+int64
+
+
+
+BackupSize is the data size of the backup.
+
+
+
+
+commitTs
+
+string
+
+
+
+CommitTs is the snapshot time point of tidb cluster.
+
+
+
+
+conditions
+
+
+[]BackupCondition
+
+
+
+
+
+
+
+
+BackupStorageType
+(string
alias)
+
+
BackupStorageType represents the backend storage type of backup.
+
+BackupType
+(string
alias)
+
+(Appears on:
+BackupSpec ,
+RestoreSpec )
+
+
+
BackupType represents the backup type.
+
+BasicAutoScalerSpec
+
+
+(Appears on:
+TidbAutoScalerSpec ,
+TikvAutoScalerSpec )
+
+
+
BasicAutoScalerSpec describes the basic spec for auto-scaling
+
+
+
+
+Field
+Description
+
+
+
+
+
+maxReplicas
+
+int32
+
+
+
+maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale out.
+It cannot be less than minReplicas.
+
+
+
+
+minReplicas
+
+int32
+
+
+
+(Optional)
+minReplicas is the lower limit for the number of replicas to which the autoscaler
+can scale down. It defaults to 1 pod. Scaling is active as long as at least one metric value is
+available.
+
+
+
+
+scaleInIntervalSeconds
+
+int32
+
+
+
+(Optional)
+ScaleInIntervalSeconds represents the duration seconds between each auto-scaling-in
+If not set, the default ScaleInIntervalSeconds will be set to 500
+
+
+
+
+scaleOutIntervalSeconds
+
+int32
+
+
+
+(Optional)
+ScaleOutIntervalSeconds represents the duration seconds between each auto-scaling-out
+If not set, the default ScaleOutIntervalSeconds will be set to 300
+
+
+
+
+metrics
+
+
+[]Kubernetes autoscaling/v2beta2.MetricSpec
+
+
+
+
+(Optional)
+metrics contains the specifications for which to use to calculate the
+desired replica count (the maximum replica count across all metrics will
+be used). The desired replica count is calculated multiplying the
+ratio between the target value and the current value by the current
+number of pods. Ergo, metrics used must decrease as the pod count is
+increased, and vice-versa. See the individual metric source types for
+more information about how each type of metric must respond.
+If not set, the default metric will be set to 80% average CPU utilization.
+
+
+
+
+metricsTimeDuration
+
+string
+
+
+
+(Optional)
+MetricsTimeDuration describe the Time duration to be queried in the Prometheus
+
+
+
+
+scaleOutThreshold
+
+int32
+
+
+
+(Optional)
+ScaleOutThreshold describe the consecutive threshold for the auto-scaling,
+if the consecutive counts of the scale-out result in auto-scaling reach this number,
+the auto-scaling would be performed.
+If not set, the default value is 3.
+
+
+
+
+scaleInThreshold
+
+int32
+
+
+
+(Optional)
+ScaleInThreshold describe the consecutive threshold for the auto-scaling,
+if the consecutive counts of the scale-in result in auto-scaling reach this number,
+the auto-scaling would be performed.
+If not set, the default value is 5.
+
+
+
+
+BasicAutoScalerStatus
+
+
+(Appears on:
+TidbAutoScalerStatus ,
+TikvAutoScalerStatus )
+
+
+
BasicAutoScalerStatus describe the basic auto-scaling status
+
+
+
+
+Field
+Description
+
+
+
+
+
+metrics
+
+
+[]MetricsStatus
+
+
+
+
+(Optional)
+MetricsStatusList describes the metrics status in the last auto-scaling reconciliation
+
+
+
+
+currentReplicas
+
+int32
+
+
+
+CurrentReplicas describes the current replicas for the component(tidb/tikv)
+
+
+
+
+recommendedReplicas
+
+int32
+
+
+
+(Optional)
+RecommendedReplicas describes the calculated replicas in the last auto-scaling reconciliation for the component(tidb/tikv)
+
+
+
+
+lastAutoScalingTimestamp
+
+
+Kubernetes meta/v1.Time
+
+
+
+
+(Optional)
+LastAutoScalingTimestamp describes the last auto-scaling timestamp for the component(tidb/tikv)
+
+
+
+
+Binlog
+
+
+(Appears on:
+TiDBConfig )
+
+
+
Binlog is the config for binlog.
+
+
+
+
+Field
+Description
+
+
+
+
+
+enable
+
+bool
+
+
+
+optional
+
+
+
+
+write-timeout
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 15s
+
+
+
+
+ignore-error
+
+bool
+
+
+
+(Optional)
+If IgnoreError is true, when writing binlog meets error, TiDB would
+ignore the error.
+
+
+
+
+binlog-socket
+
+string
+
+
+
+(Optional)
+Use socket file to write binlog, for compatible with kafka version tidb-binlog.
+
+
+
+
+strategy
+
+string
+
+
+
+(Optional)
+The strategy for sending binlog to pump, value can be “range,omitempty” or “hash,omitempty” now.
+Optional: Defaults to range
+
+
+
+
+CommonConfig
+
+
+(Appears on:
+TiFlashConfig )
+
+
+
CommonConfig is the configuration of TiFlash process.
+
+
+
+
+Field
+Description
+
+
+
+
+
+path_realtime_mode
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to false
+
+
+
+
+mark_cache_size
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 5368709120
+
+
+
+
+minmax_index_cache_size
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 5368709120
+
+
+
+
+loger
+
+
+FlashLogger
+
+
+
+
+(Optional)
+
+
+
+
+ComponentAccessor
+
+
+
ComponentAccessor is the interface to access component details, which respects the cluster-level properties
+and component-level overrides
+
+ComponentSpec
+
+
+(Appears on:
+PDSpec ,
+PumpSpec ,
+TiDBSpec ,
+TiFlashSpec ,
+TiKVSpec )
+
+
+
ComponentSpec is the base spec of each component, the fields should always accessed by the BasicSpec() method to respect the cluster-level properties
+
+
+
+
+Field
+Description
+
+
+
+
+
+image
+
+string
+
+
+
+Image of the component, override baseImage and version if present
+Deprecated
+
+
+
+
+version
+
+string
+
+
+
+(Optional)
+Version of the component. Override the cluster-level version if non-empty
+Optional: Defaults to cluster-level setting
+
+
+
+
+imagePullPolicy
+
+
+Kubernetes core/v1.PullPolicy
+
+
+
+
+(Optional)
+ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present
+Optional: Defaults to cluster-level setting
+
+
+
+
+hostNetwork
+
+bool
+
+
+
+(Optional)
+Whether Hostnetwork of the component is enabled. Override the cluster-level setting if present
+Optional: Defaults to cluster-level setting
+
+
+
+
+affinity
+
+
+Kubernetes core/v1.Affinity
+
+
+
+
+(Optional)
+Affinity of the component. Override the cluster-level one if present
+Optional: Defaults to cluster-level setting
+
+
+
+
+priorityClassName
+
+string
+
+
+
+(Optional)
+PriorityClassName of the component. Override the cluster-level one if present
+Optional: Defaults to cluster-level setting
+
+
+
+
+schedulerName
+
+string
+
+
+
+(Optional)
+SchedulerName of the component. Override the cluster-level one if present
+Optional: Defaults to cluster-level setting
+
+
+
+
+nodeSelector
+
+map[string]string
+
+
+
+(Optional)
+NodeSelector of the component. Merged into the cluster-level nodeSelector if non-empty
+Optional: Defaults to cluster-level setting
+
+
+
+
+annotations
+
+map[string]string
+
+
+
+(Optional)
+Annotations of the component. Merged into the cluster-level annotations if non-empty
+Optional: Defaults to cluster-level setting
+
+
+
+
+tolerations
+
+
+[]Kubernetes core/v1.Toleration
+
+
+
+
+(Optional)
+Tolerations of the component. Override the cluster-level tolerations if non-empty
+Optional: Defaults to cluster-level setting
+
+
+
+
+podSecurityContext
+
+
+Kubernetes core/v1.PodSecurityContext
+
+
+
+
+(Optional)
+PodSecurityContext of the component
+
+
+
+
+configUpdateStrategy
+
+
+ConfigUpdateStrategy
+
+
+
+
+(Optional)
+ConfigUpdateStrategy of the component. Override the cluster-level updateStrategy if present
+Optional: Defaults to cluster-level setting
+
+
+
+
+env
+
+
+[]Kubernetes core/v1.EnvVar
+
+
+
+
+(Optional)
+List of environment variables to set in the container, like
+v1.Container.Env.
+Note that following env names cannot be used and may be overrided by
+tidb-operator built envs.
+- NAMESPACE
+- TZ
+- SERVICE_NAME
+- PEER_SERVICE_NAME
+- HEADLESS_SERVICE_NAME
+- SET_NAME
+- HOSTNAME
+- CLUSTER_NAME
+- POD_NAME
+- BINLOG_ENABLED
+- SLOW_LOG_FILE
+
+
+
+
+ConfigUpdateStrategy
+(string
alias)
+
+(Appears on:
+ComponentSpec ,
+TidbClusterSpec )
+
+
+
ConfigUpdateStrategy represents the strategy to update configuration
+
+CoprocessorCache
+
+
+(Appears on:
+TiKVClient )
+
+
+
CoprocessorCache is the config for coprocessor cache.
+
+
+
+
+Field
+Description
+
+
+
+
+
+enabled
+
+bool
+
+
+
+(Optional)
+Whether to enable the copr cache. The copr cache saves the result from TiKV Coprocessor in the memory and
+reuses the result when corresponding data in TiKV is unchanged, on a region basis.
+
+
+
+
+capacity-mb
+
+float64
+
+
+
+(Optional)
+The capacity in MB of the cache.
+
+
+
+
+admission-max-result-mb
+
+float64
+
+
+
+(Optional)
+Only cache requests whose result set is small.
+
+
+
+
+admission-min-process-ms
+
+uint64
+
+
+
+(Optional)
+Only cache requests takes notable time to process.
+
+
+
+
+CrdKind
+
+
+(Appears on:
+CrdKinds )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+Kind
+
+string
+
+
+
+
+
+
+
+Plural
+
+string
+
+
+
+
+
+
+
+SpecName
+
+string
+
+
+
+
+
+
+
+ShortNames
+
+[]string
+
+
+
+
+
+
+
+AdditionalPrinterColums
+
+[]k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1.CustomResourceColumnDefinition
+
+
+
+
+
+
+
+CrdKinds
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+KindsString
+
+string
+
+
+
+
+
+
+
+TiDBCluster
+
+
+CrdKind
+
+
+
+
+
+
+
+
+Backup
+
+
+CrdKind
+
+
+
+
+
+
+
+
+Restore
+
+
+CrdKind
+
+
+
+
+
+
+
+
+BackupSchedule
+
+
+CrdKind
+
+
+
+
+
+
+
+
+TiDBMonitor
+
+
+CrdKind
+
+
+
+
+
+
+
+
+TiDBInitializer
+
+
+CrdKind
+
+
+
+
+
+
+
+
+TidbClusterAutoScaler
+
+
+CrdKind
+
+
+
+
+
+
+
+
+DashboardConfig
+
+
+(Appears on:
+PDConfig )
+
+
+
DashboardConfig is the configuration for tidb-dashboard.
+
+
+
+
+Field
+Description
+
+
+
+
+
+tidb_cacert_path
+
+string
+
+
+
+
+
+
+
+tidb_cert_path
+
+string
+
+
+
+
+
+
+
+tidb_key_path
+
+string
+
+
+
+
+
+
+
+Experimental
+
+
+(Appears on:
+TiDBConfig )
+
+
+
Experimental controls the features that are still experimental: their semantics, interfaces are subject to change.
+Using these features in the production environment is not recommended.
+
+
+
+
+Field
+Description
+
+
+
+
+
+allow-auto-random
+
+bool
+
+
+
+(Optional)
+Whether enable the syntax like auto_random(3)
on the primary key column.
+imported from TiDB v3.1.0
+
+
+
+
+FileLogConfig
+
+
+(Appears on:
+Log ,
+PDLogConfig )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+filename
+
+string
+
+
+
+(Optional)
+Log filename, leave empty to disable file log.
+
+
+
+
+log-rotate
+
+bool
+
+
+
+(Optional)
+Is log rotate enabled.
+
+
+
+
+max-size
+
+int
+
+
+
+(Optional)
+Max size for a single file, in MB.
+
+
+
+
+max-days
+
+int
+
+
+
+(Optional)
+Max log keep days, default is never deleting.
+
+
+
+
+max-backups
+
+int
+
+
+
+(Optional)
+Maximum number of old log files to retain.
+
+
+
+
+Flash
+
+
+(Appears on:
+CommonConfig )
+
+
+
Flash is the configuration of [flash] section.
+
+
+
+
+Field
+Description
+
+
+
+
+
+overlap_threshold
+
+float64
+
+
+
+(Optional)
+Optional: Defaults to 0.6
+
+
+
+
+compact_log_min_period
+
+int32
+
+
+
+(Optional)
+Optional: Defaults to 200
+
+
+
+
+flash_cluster
+
+
+FlashCluster
+
+
+
+
+(Optional)
+
+
+
+
+FlashLogger
+
+
+(Appears on:
+CommonConfig )
+
+
+
FlashLogger is the configuration of [logger] section.
+
+
+
+
+Field
+Description
+
+
+
+
+
+size
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 100M
+
+
+
+
+level
+
+string
+
+
+
+(Optional)
+Optional: Defaults to information
+
+
+
+
+count
+
+int32
+
+
+
+(Optional)
+Optional: Defaults to 10
+
+
+
+
+GcsStorageProvider
+
+
+(Appears on:
+StorageProvider )
+
+
+
GcsStorageProvider represents the google cloud storage for storing backups.
+
+
+
+
+Field
+Description
+
+
+
+
+
+projectId
+
+string
+
+
+
+ProjectId represents the project that organizes all your Google Cloud Platform resources
+
+
+
+
+location
+
+string
+
+
+
+Location in which the gcs bucket is located.
+
+
+
+
+bucket
+
+string
+
+
+
+Bucket in which to store the backup data.
+
+
+
+
+storageClass
+
+string
+
+
+
+StorageClass represents the storage class
+
+
+
+
+objectAcl
+
+string
+
+
+
+ObjectAcl represents the access control list for new objects
+
+
+
+
+bucketAcl
+
+string
+
+
+
+BucketAcl represents the access control list for new buckets
+
+
+
+
+secretName
+
+string
+
+
+
+SecretName is the name of secret which stores the
+gcs service account credentials JSON .
+
+
+
+
+GrafanaSpec
+
+
+(Appears on:
+TidbMonitorSpec )
+
+
+
GrafanaSpec is the desired state of grafana
+
+
+
+
+Field
+Description
+
+
+
+
+
+MonitorContainer
+
+
+MonitorContainer
+
+
+
+
+
+(Members of MonitorContainer
are embedded into this type.)
+
+
+
+
+
+logLevel
+
+string
+
+
+
+
+
+
+
+service
+
+
+ServiceSpec
+
+
+
+
+
+
+
+
+username
+
+string
+
+
+
+
+
+
+
+password
+
+string
+
+
+
+
+
+
+
+envs
+
+map[string]string
+
+
+
+(Optional)
+
+
+
+
+HelperSpec
+
+
+(Appears on:
+TidbClusterSpec )
+
+
+
HelperSpec contains details of helper component
+
+
+
+
+Field
+Description
+
+
+
+
+
+image
+
+string
+
+
+
+(Optional)
+Image used to tail slow log and set kernel parameters if necessary, must have tail
and sysctl
installed
+Optional: Defaults to busybox:1.26.2
+
+
+
+
+imagePullPolicy
+
+
+Kubernetes core/v1.PullPolicy
+
+
+
+
+(Optional)
+ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present
+Optional: Defaults to the cluster-level setting
+
+
+
+
+InitializePhase
+(string
alias)
+
+(Appears on:
+TidbInitializerStatus )
+
+
+
+InitializerSpec
+
+
+(Appears on:
+TidbMonitorSpec )
+
+
+
InitializerSpec is the desired state of initializer
+
+
+
+
+Field
+Description
+
+
+
+
+
+MonitorContainer
+
+
+MonitorContainer
+
+
+
+
+
+(Members of MonitorContainer
are embedded into this type.)
+
+
+
+
+
+envs
+
+map[string]string
+
+
+
+(Optional)
+
+
+
+
+Interval
+
+
+(Appears on:
+Quota )
+
+
+
Interval is the configuration of [quotas.default.interval] section.
+
+
+
+
+Field
+Description
+
+
+
+
+
+duration
+
+int32
+
+
+
+(Optional)
+Optional: Defaults to 3600
+
+
+
+
+queries
+
+int32
+
+
+
+(Optional)
+Optional: Defaults to 0
+
+
+
+
+errors
+
+int32
+
+
+
+(Optional)
+Optional: Defaults to 0
+
+
+
+
+result_rows
+
+int32
+
+
+
+(Optional)
+Optional: Defaults to 0
+
+
+
+
+read_rows
+
+int32
+
+
+
+(Optional)
+Optional: Defaults to 0
+
+
+
+
+execution_time
+
+int32
+
+
+
+(Optional)
+Optional: Defaults to 0
+
+
+
+
+IsolationRead
+
+
+(Appears on:
+TiDBConfig )
+
+
+
IsolationRead is the config for isolation read.
+
+
+
+
+Field
+Description
+
+
+
+
+
+engines
+
+[]string
+
+
+
+(Optional)
+Engines filters tidb-server access paths by engine type.
+imported from v3.1.0
+
+
+
+
+Log
+
+
+(Appears on:
+TiDBConfig )
+
+
+
Log is the log section of config.
+
+
+
+
+Field
+Description
+
+
+
+
+
+level
+
+string
+
+
+
+(Optional)
+Log level.
+Optional: Defaults to info
+
+
+
+
+format
+
+string
+
+
+
+(Optional)
+Log format. one of json, text, or console.
+Optional: Defaults to text
+
+
+
+
+disable-timestamp
+
+bool
+
+
+
+(Optional)
+Disable automatic timestamps in output.
+
+
+
+
+enable-timestamp
+
+bool
+
+
+
+(Optional)
+EnableTimestamp enables automatic timestamps in log output.
+
+
+
+
+enable-error-stack
+
+bool
+
+
+
+(Optional)
+EnableErrorStack enables annotating logs with the full stack error
+message.
+
+
+
+
+file
+
+
+FileLogConfig
+
+
+
+
+(Optional)
+File log config.
+
+
+
+
+enable-slow-log
+
+bool
+
+
+
+(Optional)
+
+
+
+
+slow-query-file
+
+string
+
+
+
+(Optional)
+
+
+
+
+slow-threshold
+
+uint64
+
+
+
+(Optional)
+Optional: Defaults to 300
+
+
+
+
+expensive-threshold
+
+uint
+
+
+
+(Optional)
+Optional: Defaults to 10000
+
+
+
+
+query-log-max-len
+
+uint64
+
+
+
+(Optional)
+Optional: Defaults to 2048
+
+
+
+
+record-plan-in-slow-log
+
+uint32
+
+
+
+(Optional)
+Optional: Defaults to 1
+
+
+
+
+LogTailerSpec
+
+
+(Appears on:
+TiFlashSpec )
+
+
+
LogTailerSpec represents an optional log tailer sidecar container
+
+
+MasterKeyFileConfig
+
+
+(Appears on:
+TiKVMasterKeyConfig )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+method
+
+string
+
+
+
+Encrypyion method, use master key encryption data key
+Possible values: plaintext, aes128-ctr, aes192-ctr, aes256-ctr
+Optional: Default to plaintext
+optional
+
+
+
+
+MasterKeyKMSConfig
+
+
+(Appears on:
+TiKVMasterKeyConfig )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+key-id
+
+string
+
+
+
+AWS CMK key-id it can be find in AWS Console or use aws cli
+This field is required
+
+
+
+
+access-key
+
+string
+
+
+
+AccessKey of AWS user, leave empty if using other authrization method
+optional
+
+
+
+
+secret-access-key
+
+string
+
+
+
+SecretKey of AWS user, leave empty if using other authrization method
+optional
+
+
+
+
+region
+
+string
+
+
+
+Region of this KMS key
+Optional: Default to us-east-1
+optional
+
+
+
+
+endpoint
+
+string
+
+
+
+Used for KMS compatible KMS, such as Ceph, minio, If use AWS, leave empty
+optional
+
+
+
+
+MemberPhase
+(string
alias)
+
+(Appears on:
+PDStatus ,
+PumpStatus ,
+TiDBStatus ,
+TiKVStatus )
+
+
+
MemberPhase is the current state of member
+
+MemberType
+(string
alias)
+
+
MemberType represents member type
+
+MetricsStatus
+
+
+(Appears on:
+BasicAutoScalerStatus )
+
+
+
MetricsStatus describe the basic metrics status in the last auto-scaling reconciliation
+
+
+
+
+Field
+Description
+
+
+
+
+
+name
+
+string
+
+
+
+Name indicates the metrics name
+
+
+
+
+currentValue
+
+string
+
+
+
+CurrentValue indicates the value calculated in the last auto-scaling reconciliation
+
+
+
+
+thresholdValue
+
+string
+
+
+
+TargetValue indicates the threshold value for this metrics in auto-scaling
+
+
+
+
+MonitorComponentAccessor
+
+
+
+MonitorContainer
+
+
+(Appears on:
+GrafanaSpec ,
+InitializerSpec ,
+PrometheusSpec ,
+ReloaderSpec )
+
+
+
MonitorContainer is the common attributes of the container of monitoring
+
+
+Networks
+
+
+(Appears on:
+User )
+
+
+
Networks is the configuration of [users.readonly.networks] section.
+
+
+
+
+Field
+Description
+
+
+
+
+
+ip
+
+string
+
+
+
+(Optional)
+
+
+
+
+OpenTracing
+
+
+(Appears on:
+TiDBConfig )
+
+
+
OpenTracing is the opentracing section of the config.
+
+
+
+
+Field
+Description
+
+
+
+
+
+enable
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to false
+
+
+
+
+sampler
+
+
+OpenTracingSampler
+
+
+
+
+(Optional)
+
+
+
+
+reporter
+
+
+OpenTracingReporter
+
+
+
+
+(Optional)
+
+
+
+
+rpc-metrics
+
+bool
+
+
+
+(Optional)
+
+
+
+
+OpenTracingReporter
+
+
+(Appears on:
+OpenTracing )
+
+
+
OpenTracingReporter is the config for opentracing reporter.
+See https://godoc.org/github.com/uber/jaeger-client-go/config#ReporterConfig
+
+
+
+
+Field
+Description
+
+
+
+
+
+queue-size
+
+int
+
+
+
+(Optional)
+
+
+
+
+buffer-flush-interval
+
+time.Duration
+
+
+
+(Optional)
+
+
+
+
+log-spans
+
+bool
+
+
+
+(Optional)
+
+
+
+
+local-agent-host-port
+
+string
+
+
+
+(Optional)
+
+
+
+
+OpenTracingSampler
+
+
+(Appears on:
+OpenTracing )
+
+
+
OpenTracingSampler is the config for opentracing sampler.
+See https://godoc.org/github.com/uber/jaeger-client-go/config#SamplerConfig
+
+
+
+
+Field
+Description
+
+
+
+
+
+type
+
+string
+
+
+
+(Optional)
+
+
+
+
+param
+
+float64
+
+
+
+(Optional)
+
+
+
+
+sampling-server-url
+
+string
+
+
+
+(Optional)
+
+
+
+
+max-operations
+
+int
+
+
+
+(Optional)
+
+
+
+
+sampling-refresh-interval
+
+time.Duration
+
+
+
+(Optional)
+
+
+
+
+PDConfig
+
+
+(Appears on:
+PDSpec )
+
+
+
PDConfig is the configuration of pd-server
+
+
+
+
+Field
+Description
+
+
+
+
+
+force-new-cluster
+
+bool
+
+
+
+(Optional)
+
+
+
+
+enable-grpc-gateway
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to true
+
+
+
+
+lease
+
+int64
+
+
+
+(Optional)
+LeaderLease time, if leader doesn’t update its TTL
+in etcd after lease time, etcd will expire the leader key
+and other servers can campaign the leader again.
+Etcd only supports seconds TTL, so here is second too.
+Optional: Defaults to 3
+
+
+
+
+log
+
+
+PDLogConfig
+
+
+
+
+(Optional)
+Log related config.
+
+
+
+
+log-file
+
+string
+
+
+
+(Optional)
+Backward compatibility.
+
+
+
+
+log-level
+
+string
+
+
+
+(Optional)
+
+
+
+
+tso-save-interval
+
+string
+
+
+
+(Optional)
+TsoSaveInterval is the interval to save timestamp.
+Optional: Defaults to 3s
+
+
+
+
+metric
+
+
+PDMetricConfig
+
+
+
+
+(Optional)
+
+
+
+
+schedule
+
+
+PDScheduleConfig
+
+
+
+
+(Optional)
+Immutable, change should be made through pd-ctl after cluster creation
+
+
+
+
+replication
+
+
+PDReplicationConfig
+
+
+
+
+(Optional)
+Immutable, change should be made through pd-ctl after cluster creation
+
+
+
+
+namespace
+
+
+map[string]github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDNamespaceConfig
+
+
+
+
+(Optional)
+
+
+
+
+pd-server
+
+
+PDServerConfig
+
+
+
+
+(Optional)
+
+
+
+
+cluster-version
+
+string
+
+
+
+(Optional)
+
+
+
+
+quota-backend-bytes
+
+string
+
+
+
+(Optional)
+QuotaBackendBytes Raise alarms when backend size exceeds the given quota. 0 means use the default quota.
+the default size is 2GB, the maximum is 8GB.
+
+
+
+
+auto-compaction-mode
+
+string
+
+
+
+(Optional)
+AutoCompactionMode is either ‘periodic’ or ‘revision’. The default value is ‘periodic’.
+
+
+
+
+auto-compaction-retention-v2
+
+string
+
+
+
+(Optional)
+AutoCompactionRetention is either duration string with time unit
+(e.g. ‘5m’ for 5-minute), or revision unit (e.g. ‘5000’).
+If no time unit is provided and compaction mode is ‘periodic’,
+the unit defaults to hour. For example, ‘5’ translates into 5-hour.
+The default retention is 1 hour.
+Before etcd v3.3.x, the type of retention is int. We add ‘v2’ suffix to make it backward compatible.
+
+
+
+
+tikv-interval
+
+string
+
+
+
+(Optional)
+TickInterval is the interval for etcd Raft tick.
+
+
+
+
+election-interval
+
+string
+
+
+
+(Optional)
+ElectionInterval is the interval for etcd Raft election.
+
+
+
+
+enable-prevote
+
+bool
+
+
+
+(Optional)
+Prevote is true to enable Raft Pre-Vote.
+If enabled, Raft runs an additional election phase
+to check whether it would get enough votes to win
+an election, thus minimizing disruptions.
+Optional: Defaults to true
+
+
+
+
+security
+
+
+PDSecurityConfig
+
+
+
+
+(Optional)
+
+
+
+
+label-property
+
+
+PDLabelPropertyConfig
+
+
+
+
+(Optional)
+
+
+
+
+namespace-classifier
+
+string
+
+
+
+(Optional)
+NamespaceClassifier is for classifying stores/regions into different
+namespaces.
+Optional: Defaults to true
+
+
+
+
+dashboard
+
+
+DashboardConfig
+
+
+
+
+(Optional)
+
+
+
+
+PDFailureMember
+
+
+(Appears on:
+PDStatus )
+
+
+
PDFailureMember is the pd failure member information
+
+
+
+
+Field
+Description
+
+
+
+
+
+podName
+
+string
+
+
+
+
+
+
+
+memberID
+
+string
+
+
+
+
+
+
+
+pvcUID
+
+k8s.io/apimachinery/pkg/types.UID
+
+
+
+
+
+
+
+memberDeleted
+
+bool
+
+
+
+
+
+
+
+createdAt
+
+
+Kubernetes meta/v1.Time
+
+
+
+
+
+
+
+
+PDLabelPropertyConfig
+(map[string]github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDStoreLabels
alias)
+
+(Appears on:
+PDConfig )
+
+
+
+PDLogConfig
+
+
+(Appears on:
+PDConfig )
+
+
+
PDLogConfig serializes log related config in toml/json.
+
+
+
+
+Field
+Description
+
+
+
+
+
+level
+
+string
+
+
+
+(Optional)
+Log level.
+Optional: Defaults to info
+
+
+
+
+format
+
+string
+
+
+
+(Optional)
+Log format. one of json, text, or console.
+
+
+
+
+disable-timestamp
+
+bool
+
+
+
+(Optional)
+Disable automatic timestamps in output.
+
+
+
+
+file
+
+
+FileLogConfig
+
+
+
+
+(Optional)
+File log config.
+
+
+
+
+development
+
+bool
+
+
+
+(Optional)
+Development puts the logger in development mode, which changes the
+behavior of DPanicLevel and takes stacktraces more liberally.
+
+
+
+
+disable-caller
+
+bool
+
+
+
+(Optional)
+DisableCaller stops annotating logs with the calling function’s file
+name and line number. By default, all logs are annotated.
+
+
+
+
+disable-stacktrace
+
+bool
+
+
+
+(Optional)
+DisableStacktrace completely disables automatic stacktrace capturing. By
+default, stacktraces are captured for WarnLevel and above logs in
+development and ErrorLevel and above in production.
+
+
+
+
+disable-error-verbose
+
+bool
+
+
+
+(Optional)
+DisableErrorVerbose stops annotating logs with the full verbose error
+message.
+
+
+
+
+PDMember
+
+
+(Appears on:
+PDStatus )
+
+
+
PDMember is PD member
+
+
+
+
+Field
+Description
+
+
+
+
+
+name
+
+string
+
+
+
+
+
+
+
+id
+
+string
+
+
+
+member id is actually a uint64, but apimachinery’s json only treats numbers as int64/float64
+so uint64 may overflow int64 and thus convert to float64
+
+
+
+
+clientURL
+
+string
+
+
+
+
+
+
+
+health
+
+bool
+
+
+
+
+
+
+
+lastTransitionTime
+
+
+Kubernetes meta/v1.Time
+
+
+
+
+Last time the health transitioned from one to another.
+
+
+
+
+PDMetricConfig
+
+
+(Appears on:
+PDConfig )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+job
+
+string
+
+
+
+(Optional)
+
+
+
+
+address
+
+string
+
+
+
+(Optional)
+
+
+
+
+interval
+
+string
+
+
+
+(Optional)
+
+
+
+
+PDNamespaceConfig
+
+
+(Appears on:
+PDConfig )
+
+
+
PDNamespaceConfig is to overwrite the global setting for specific namespace
+
+
+
+
+Field
+Description
+
+
+
+
+
+leader-schedule-limit
+
+uint64
+
+
+
+(Optional)
+LeaderScheduleLimit is the max coexist leader schedules.
+
+
+
+
+region-schedule-limit
+
+uint64
+
+
+
+(Optional)
+RegionScheduleLimit is the max coexist region schedules.
+
+
+
+
+replica-schedule-limit
+
+uint64
+
+
+
+(Optional)
+ReplicaScheduleLimit is the max coexist replica schedules.
+
+
+
+
+merge-schedule-limit
+
+uint64
+
+
+
+(Optional)
+MergeScheduleLimit is the max coexist merge schedules.
+
+
+
+
+hot-region-schedule-limit
+
+uint64
+
+
+
+(Optional)
+HotRegionScheduleLimit is the max coexist hot region schedules.
+
+
+
+
+max-replicas
+
+uint64
+
+
+
+(Optional)
+MaxReplicas is the number of replicas for each region.
+
+
+
+
+PDReplicationConfig
+
+
+(Appears on:
+PDConfig )
+
+
+
PDReplicationConfig is the replication configuration.
+
+
+
+
+Field
+Description
+
+
+
+
+
+max-replicas
+
+uint64
+
+
+
+(Optional)
+MaxReplicas is the number of replicas for each region.
+Immutable, change should be made through pd-ctl after cluster creation
+Optional: Defaults to 3
+
+
+
+
+location-labels
+
+[]string
+
+
+
+(Optional)
+The label keys specified the location of a store.
+The placement priorities is implied by the order of label keys.
+For example, [“zone”, “rack”] means that we should place replicas to
+different zones first, then to different racks if we don’t have enough zones.
+Immutable, change should be made through pd-ctl after cluster creation
+
+
+
+
+strictly-match-label,string
+
+bool
+
+
+
+(Optional)
+StrictlyMatchLabel strictly checks if the label of TiKV is matched with LocaltionLabels.
+Immutable, change should be made through pd-ctl after cluster creation.
+Imported from v3.1.0
+
+
+
+
+enable-placement-rules,string
+
+bool
+
+
+
+(Optional)
+When PlacementRules feature is enabled. MaxReplicas and LocationLabels are not used anymore.
+
+
+
+
+PDScheduleConfig
+
+
+(Appears on:
+PDConfig )
+
+
+
ScheduleConfig is the schedule configuration.
+
+
+
+
+Field
+Description
+
+
+
+
+
+max-snapshot-count
+
+uint64
+
+
+
+(Optional)
+If the snapshot count of one store is greater than this value,
+it will never be used as a source or target store.
+Immutable, change should be made through pd-ctl after cluster creation
+Optional: Defaults to 3
+
+
+
+
+max-pending-peer-count
+
+uint64
+
+
+
+(Optional)
+Immutable, change should be made through pd-ctl after cluster creation
+Optional: Defaults to 16
+
+
+
+
+max-merge-region-size
+
+uint64
+
+
+
+(Optional)
+If both the size of region is smaller than MaxMergeRegionSize
+and the number of rows in region is smaller than MaxMergeRegionKeys,
+it will try to merge with adjacent regions.
+Immutable, change should be made through pd-ctl after cluster creation
+Optional: Defaults to 20
+
+
+
+
+max-merge-region-keys
+
+uint64
+
+
+
+(Optional)
+Immutable, change should be made through pd-ctl after cluster creation
+Optional: Defaults to 200000
+
+
+
+
+split-merge-interval
+
+string
+
+
+
+(Optional)
+SplitMergeInterval is the minimum interval time to permit merge after split.
+Immutable, change should be made through pd-ctl after cluster creation
+Optional: Defaults to 1h
+
+
+
+
+patrol-region-interval
+
+string
+
+
+
+(Optional)
+PatrolRegionInterval is the interval for scanning region during patrol.
+Immutable, change should be made through pd-ctl after cluster creation
+
+
+
+
+max-store-down-time
+
+string
+
+
+
+(Optional)
+MaxStoreDownTime is the max duration after which
+a store will be considered to be down if it hasn’t reported heartbeats.
+Immutable, change should be made through pd-ctl after cluster creation
+Optional: Defaults to 30m
+
+
+
+
+leader-schedule-limit
+
+uint64
+
+
+
+(Optional)
+LeaderScheduleLimit is the max coexist leader schedules.
+Immutable, change should be made through pd-ctl after cluster creation.
+Optional: Defaults to 4.
+Imported from v3.1.0
+
+
+
+
+region-schedule-limit
+
+uint64
+
+
+
+(Optional)
+RegionScheduleLimit is the max coexist region schedules.
+Immutable, change should be made through pd-ctl after cluster creation
+Optional: Defaults to 2048
+
+
+
+
+replica-schedule-limit
+
+uint64
+
+
+
+(Optional)
+ReplicaScheduleLimit is the max coexist replica schedules.
+Immutable, change should be made through pd-ctl after cluster creation
+Optional: Defaults to 64
+
+
+
+
+merge-schedule-limit
+
+uint64
+
+
+
+(Optional)
+MergeScheduleLimit is the max coexist merge schedules.
+Immutable, change should be made through pd-ctl after cluster creation
+Optional: Defaults to 8
+
+
+
+
+hot-region-schedule-limit
+
+uint64
+
+
+
+(Optional)
+HotRegionScheduleLimit is the max coexist hot region schedules.
+Immutable, change should be made through pd-ctl after cluster creation
+Optional: Defaults to 4
+
+
+
+
+hot-region-cache-hits-threshold
+
+uint64
+
+
+
+(Optional)
+HotRegionCacheHitThreshold is the cache hits threshold of the hot region.
+If the number of times a region hits the hot cache is greater than this
+threshold, it is considered a hot region.
+Immutable, change should be made through pd-ctl after cluster creation
+
+
+
+
+tolerant-size-ratio
+
+float64
+
+
+
+(Optional)
+TolerantSizeRatio is the ratio of buffer size for balance scheduler.
+Immutable, change should be made through pd-ctl after cluster creation.
+Imported from v3.1.0
+
+
+
+
+low-space-ratio
+
+float64
+
+
+
+(Optional)
+ high space stage transition stage low space stage
+
+|——————–|—————————–|————————-|
+^ ^ ^ ^
+0 HighSpaceRatio * capacity LowSpaceRatio * capacity capacity
+LowSpaceRatio is the lowest usage ratio of store which regraded as low space.
+When in low space, store region score increases to very large and varies inversely with available size.
+Immutable, change should be made through pd-ctl after cluster creation
+
+
+
+
+high-space-ratio
+
+float64
+
+
+
+(Optional)
+HighSpaceRatio is the highest usage ratio of store which regraded as high space.
+High space means there is a lot of spare capacity, and store region score varies directly with used size.
+Immutable, change should be made through pd-ctl after cluster creation
+
+
+
+
+disable-raft-learner,string
+
+bool
+
+
+
+(Optional)
+DisableLearner is the option to disable using AddLearnerNode instead of AddNode
+Immutable, change should be made through pd-ctl after cluster creation
+
+
+
+
+disable-remove-down-replica,string
+
+bool
+
+
+
+(Optional)
+DisableRemoveDownReplica is the option to prevent replica checker from
+removing down replicas.
+Immutable, change should be made through pd-ctl after cluster creation
+
+
+
+
+disable-replace-offline-replica,string
+
+bool
+
+
+
+(Optional)
+DisableReplaceOfflineReplica is the option to prevent replica checker from
+repalcing offline replicas.
+Immutable, change should be made through pd-ctl after cluster creation
+
+
+
+
+disable-make-up-replica,string
+
+bool
+
+
+
+(Optional)
+DisableMakeUpReplica is the option to prevent replica checker from making up
+replicas when replica count is less than expected.
+Immutable, change should be made through pd-ctl after cluster creation
+
+
+
+
+disable-remove-extra-replica,string
+
+bool
+
+
+
+(Optional)
+DisableRemoveExtraReplica is the option to prevent replica checker from
+removing extra replicas.
+Immutable, change should be made through pd-ctl after cluster creation
+
+
+
+
+disable-location-replacement,string
+
+bool
+
+
+
+(Optional)
+DisableLocationReplacement is the option to prevent replica checker from
+moving replica to a better location.
+Immutable, change should be made through pd-ctl after cluster creation
+
+
+
+
+disable-namespace-relocation,string
+
+bool
+
+
+
+(Optional)
+DisableNamespaceRelocation is the option to prevent namespace checker
+from moving replica to the target namespace.
+Immutable, change should be made through pd-ctl after cluster creation
+
+
+
+
+schedulers-v2
+
+
+PDSchedulerConfigs
+
+
+
+
+(Optional)
+Schedulers support for loding customized schedulers
+Immutable, change should be made through pd-ctl after cluster creation
+
+
+
+
+schedulers-payload
+
+map[string]string
+
+
+
+(Optional)
+Only used to display
+
+
+
+
+enable-one-way-merge,string
+
+bool
+
+
+
+(Optional)
+EnableOneWayMerge is the option to enable one way merge. This means a Region can only be merged into the next region of it.
+Imported from v3.1.0
+
+
+
+
+enable-cross-table-merge,string
+
+bool
+
+
+
+(Optional)
+EnableCrossTableMerge is the option to enable cross table merge. This means two Regions can be merged with different table IDs.
+This option only works when key type is “table”.
+Imported from v3.1.0
+
+
+
+
+PDSchedulerConfig
+
+
+
PDSchedulerConfig is customized scheduler configuration
+
+
+
+
+Field
+Description
+
+
+
+
+
+type
+
+string
+
+
+
+(Optional)
+Immutable, change should be made through pd-ctl after cluster creation
+
+
+
+
+args
+
+[]string
+
+
+
+(Optional)
+Immutable, change should be made through pd-ctl after cluster creation
+
+
+
+
+disable
+
+bool
+
+
+
+(Optional)
+Immutable, change should be made through pd-ctl after cluster creation
+
+
+
+
+PDSchedulerConfigs
+([]github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSchedulerConfig
alias)
+
+(Appears on:
+PDScheduleConfig )
+
+
+
+PDSecurityConfig
+
+
+(Appears on:
+PDConfig )
+
+
+
PDSecurityConfig is the configuration for supporting tls.
+
+
+
+
+Field
+Description
+
+
+
+
+
+cacert-path
+
+string
+
+
+
+(Optional)
+CAPath is the path of file that contains list of trusted SSL CAs. if set, following four settings shouldn’t be empty
+
+
+
+
+cert-path
+
+string
+
+
+
+(Optional)
+CertPath is the path of file that contains X509 certificate in PEM format.
+
+
+
+
+key-path
+
+string
+
+
+
+(Optional)
+KeyPath is the path of file that contains X509 key in PEM format.
+
+
+
+
+cert-allowed-cn
+
+[]string
+
+
+
+(Optional)
+CertAllowedCN is the Common Name that allowed
+
+
+
+
+PDServerConfig
+
+
+(Appears on:
+PDConfig )
+
+
+
PDServerConfig is the configuration for pd server.
+
+
+
+
+Field
+Description
+
+
+
+
+
+use-region-storage,string
+
+bool
+
+
+
+(Optional)
+UseRegionStorage enables the independent region storage.
+
+
+
+
+metric-storage
+
+string
+
+
+
+(Optional)
+MetricStorage is the cluster metric storage.
+Currently we use prometheus as metric storage, we may use PD/TiKV as metric storage later.
+Imported from v3.1.0
+
+
+
+
+PDSpec
+
+
+(Appears on:
+TidbClusterSpec )
+
+
+
PDSpec contains details of PD members
+
+
+
+
+Field
+Description
+
+
+
+
+
+ComponentSpec
+
+
+ComponentSpec
+
+
+
+
+
+(Members of ComponentSpec
are embedded into this type.)
+
+
+
+
+
+ResourceRequirements
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+
+
+
+(Members of ResourceRequirements
are embedded into this type.)
+
+
+
+
+
+replicas
+
+int32
+
+
+
+The desired ready replicas
+
+
+
+
+baseImage
+
+string
+
+
+
+(Optional)
+TODO: remove optional after defaulting introduced
+Base image of the component, image tag is now allowed during validation
+
+
+
+
+service
+
+
+ServiceSpec
+
+
+
+
+(Optional)
+Service defines a Kubernetes service of PD cluster.
+Optional: Defaults to .spec.services
in favor of backward compatibility
+
+
+
+
+maxFailoverCount
+
+int32
+
+
+
+(Optional)
+MaxFailoverCount limit the max replicas could be added in failover, 0 means no failover.
+Optional: Defaults to 3
+
+
+
+
+storageClassName
+
+string
+
+
+
+(Optional)
+The storageClassName of the persistent volume for PD data storage.
+Defaults to Kubernetes default storage class.
+
+
+
+
+config
+
+
+PDConfig
+
+
+
+
+(Optional)
+Config is the Configuration of pd-servers
+
+
+
+
+PDStatus
+
+
+(Appears on:
+TidbClusterStatus )
+
+
+
PDStatus is PD status
+
+
+PDStoreLabel
+
+
+
PDStoreLabel is the config item of LabelPropertyConfig.
+
+
+
+
+Field
+Description
+
+
+
+
+
+key
+
+string
+
+
+
+(Optional)
+
+
+
+
+value
+
+string
+
+
+
+(Optional)
+
+
+
+
+PDStoreLabels
+([]github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDStoreLabel
alias)
+
+
+
+
+(Appears on:
+TiDBConfig )
+
+
+
Performance is the performance section of the config.
+
+
+
+
+Field
+Description
+
+
+
+
+
+max-procs
+
+uint
+
+
+
+(Optional)
+
+
+
+
+max-memory
+
+uint64
+
+
+
+(Optional)
+Optional: Defaults to 0
+
+
+
+
+stats-lease
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 3s
+
+
+
+
+stmt-count-limit
+
+uint
+
+
+
+(Optional)
+Optional: Defaults to 5000
+
+
+
+
+feedback-probability
+
+float64
+
+
+
+(Optional)
+Optional: Defaults to 0.05
+
+
+
+
+query-feedback-limit
+
+uint
+
+
+
+(Optional)
+Optional: Defaults to 1024
+
+
+
+
+pseudo-estimate-ratio
+
+float64
+
+
+
+(Optional)
+Optional: Defaults to 0.8
+
+
+
+
+force-priority
+
+string
+
+
+
+(Optional)
+Optional: Defaults to NO_PRIORITY
+
+
+
+
+bind-info-lease
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 3s
+
+
+
+
+txn-total-size-limit
+
+uint64
+
+
+
+(Optional)
+Optional: Defaults to 104857600
+
+
+
+
+tcp-keep-alive
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to true
+
+
+
+
+cross-join
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to true
+
+
+
+
+run-auto-analyze
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to true
+
+
+
+
+txn-entry-count-limit
+
+uint64
+
+
+
+(Optional)
+Optional: Defaults to 300000
+
+
+
+
+PessimisticTxn
+
+
+(Appears on:
+TiDBConfig )
+
+
+
PessimisticTxn is the config for pessimistic transaction.
+
+
+
+
+Field
+Description
+
+
+
+
+
+enable
+
+bool
+
+
+
+(Optional)
+Enable must be true for ‘begin lock’ or session variable to start a pessimistic transaction.
+Optional: Defaults to true
+
+
+
+
+max-retry-count
+
+uint
+
+
+
+(Optional)
+The max count of retry for a single statement in a pessimistic transaction.
+Optional: Defaults to 256
+
+
+
+
+PlanCache
+
+
+
PlanCache is the PlanCache section of the config.
+
+
+
+
+Field
+Description
+
+
+
+
+
+enabled
+
+bool
+
+
+
+(Optional)
+
+
+
+
+capacity
+
+uint
+
+
+
+(Optional)
+
+
+
+
+shards
+
+uint
+
+
+
+(Optional)
+
+
+
+
+Plugin
+
+
+(Appears on:
+TiDBConfig )
+
+
+
Plugin is the config for plugin
+
+
+
+
+Field
+Description
+
+
+
+
+
+dir
+
+string
+
+
+
+(Optional)
+
+
+
+
+load
+
+string
+
+
+
+(Optional)
+
+
+
+
+PreparedPlanCache
+
+
+(Appears on:
+TiDBConfig )
+
+
+
PreparedPlanCache is the PreparedPlanCache section of the config.
+
+
+
+
+Field
+Description
+
+
+
+
+
+enabled
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to false
+
+
+
+
+capacity
+
+uint
+
+
+
+(Optional)
+Optional: Defaults to 100
+
+
+
+
+memory-guard-ratio
+
+float64
+
+
+
+(Optional)
+Optional: Defaults to 0.1
+
+
+
+
+Profile
+
+
+
Profile is the configuration profiles.
+
+
+
+
+Field
+Description
+
+
+
+
+
+readonly
+
+int32
+
+
+
+(Optional)
+
+
+
+
+max_memory_usage
+
+int64
+
+
+
+(Optional)
+
+
+
+
+use_uncompressed_cache
+
+int32
+
+
+
+(Optional)
+
+
+
+
+load_balancing
+
+string
+
+
+
+(Optional)
+
+
+
+
+PrometheusSpec
+
+
+(Appears on:
+TidbMonitorSpec )
+
+
+
PrometheusSpec is the desired state of prometheus
+
+
+
+
+Field
+Description
+
+
+
+
+
+MonitorContainer
+
+
+MonitorContainer
+
+
+
+
+
+(Members of MonitorContainer
are embedded into this type.)
+
+
+
+
+
+logLevel
+
+string
+
+
+
+
+
+
+
+service
+
+
+ServiceSpec
+
+
+
+
+
+
+
+
+reserveDays
+
+int
+
+
+
+(Optional)
+
+
+
+
+ProxyProtocol
+
+
+(Appears on:
+TiDBConfig )
+
+
+
ProxyProtocol is the PROXY protocol section of the config.
+
+
+
+
+Field
+Description
+
+
+
+
+
+networks
+
+string
+
+
+
+(Optional)
+PROXY protocol acceptable client networks.
+Empty *string means disable PROXY protocol,
+* means all networks.
+
+
+
+
+header-timeout
+
+uint
+
+
+
+(Optional)
+PROXY protocol header read timeout, Unit is second.
+
+
+
+
+PumpSpec
+
+
+(Appears on:
+TidbClusterSpec )
+
+
+
PumpSpec contains details of Pump members
+
+
+
+
+Field
+Description
+
+
+
+
+
+ComponentSpec
+
+
+ComponentSpec
+
+
+
+
+
+(Members of ComponentSpec
are embedded into this type.)
+
+
+
+
+
+ResourceRequirements
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+
+
+
+(Members of ResourceRequirements
are embedded into this type.)
+
+
+
+
+
+replicas
+
+int32
+
+
+
+The desired ready replicas
+
+
+
+
+baseImage
+
+string
+
+
+
+(Optional)
+TODO: remove optional after defaulting introduced
+Base image of the component, image tag is now allowed during validation
+
+
+
+
+storageClassName
+
+string
+
+
+
+(Optional)
+The storageClassName of the persistent volume for Pump data storage.
+Defaults to Kubernetes default storage class.
+
+
+
+
+GenericConfig
+
+github.com/pingcap/tidb-operator/pkg/util/config.GenericConfig
+
+
+
+
+(Members of GenericConfig
are embedded into this type.)
+
+(Optional)
+TODO: add schema
+The configuration of Pump cluster.
+
+
+
+
+setTimeZone
+
+bool
+
+
+
+For backward compatibility with helm chart
+
+
+
+
+PumpStatus
+
+
+(Appears on:
+TidbClusterStatus )
+
+
+
PumpStatus is Pump status
+
+
+Quota
+
+
+
Quota is the configuration of [quotas.default] section.
+
+
+
+
+Field
+Description
+
+
+
+
+
+interval
+
+
+Interval
+
+
+
+
+(Optional)
+
+
+
+
+ReloaderSpec
+
+
+(Appears on:
+TidbMonitorSpec )
+
+
+
ReloaderSpec is the desired state of reloader
+
+
+
+
+Field
+Description
+
+
+
+
+
+MonitorContainer
+
+
+MonitorContainer
+
+
+
+
+
+(Members of MonitorContainer
are embedded into this type.)
+
+
+
+
+
+service
+
+
+ServiceSpec
+
+
+
+
+
+
+
+
+RestoreCondition
+
+
+(Appears on:
+RestoreStatus )
+
+
+
RestoreCondition describes the observed state of a Restore at a certain point.
+
+
+RestoreConditionType
+(string
alias)
+
+(Appears on:
+RestoreCondition )
+
+
+
RestoreConditionType represents a valid condition of a Restore.
+
+RestoreSpec
+
+
+(Appears on:
+Restore )
+
+
+
RestoreSpec contains the specification for a restore of a tidb cluster backup.
+
+
+
+
+Field
+Description
+
+
+
+
+
+to
+
+
+TiDBAccessConfig
+
+
+
+
+To is the tidb cluster that needs to restore.
+
+
+
+
+backupType
+
+
+BackupType
+
+
+
+
+Type is the backup type for tidb cluster.
+
+
+
+
+tikvGCLifeTime
+
+string
+
+
+
+TikvGCLifeTime is to specify the safe gc life time for restore.
+The time limit during which data is retained for each GC, in the format of Go Duration.
+When a GC happens, the current time minus this value is the safe point.
+
+
+
+
+StorageProvider
+
+
+StorageProvider
+
+
+
+
+
+(Members of StorageProvider
are embedded into this type.)
+
+StorageProvider configures where and how backups should be stored.
+
+
+
+
+storageClassName
+
+string
+
+
+
+(Optional)
+The storageClassName of the persistent volume for Restore data storage.
+Defaults to Kubernetes default storage class.
+
+
+
+
+storageSize
+
+string
+
+
+
+StorageSize is the request storage size for backup job
+
+
+
+
+br
+
+
+BRConfig
+
+
+
+
+BR is the configs for BR.
+
+
+
+
+tolerations
+
+
+[]Kubernetes core/v1.Toleration
+
+
+
+
+(Optional)
+Base tolerations of restore Pods, components may add more tolerations upon this respectively
+
+
+
+
+affinity
+
+
+Kubernetes core/v1.Affinity
+
+
+
+
+(Optional)
+Affinity of restore Pods
+
+
+
+
+useKMS
+
+bool
+
+
+
+Use KMS to decrypt the secrets
+
+
+
+
+serviceAccount
+
+string
+
+
+
+Specify service account of restore
+
+
+
+
+RestoreStatus
+
+
+(Appears on:
+Restore )
+
+
+
RestoreStatus represents the current status of a tidb cluster restore.
+
+
+S3StorageProvider
+
+
+(Appears on:
+StorageProvider )
+
+
+
S3StorageProvider represents a S3 compliant storage for storing backups.
+
+
+
+
+Field
+Description
+
+
+
+
+
+provider
+
+
+S3StorageProviderType
+
+
+
+
+Provider represents the specific storage provider that implements the S3 interface
+
+
+
+
+region
+
+string
+
+
+
+Region in which the S3 compatible bucket is located.
+
+
+
+
+bucket
+
+string
+
+
+
+Bucket in which to store the backup data.
+
+
+
+
+endpoint
+
+string
+
+
+
+Endpoint of S3 compatible storage service
+
+
+
+
+storageClass
+
+string
+
+
+
+StorageClass represents the storage class
+
+
+
+
+acl
+
+string
+
+
+
+Acl represents access control permissions for this bucket
+
+
+
+
+secretName
+
+string
+
+
+
+SecretName is the name of secret which stores
+S3 compliant storage access key and secret key.
+
+
+
+
+prefix
+
+string
+
+
+
+Prefix for the keys.
+
+
+
+
+sse
+
+string
+
+
+
+SSE Sever-Side Encryption.
+
+
+
+
+S3StorageProviderType
+(string
alias)
+
+(Appears on:
+S3StorageProvider )
+
+
+
S3StorageProviderType represents the specific storage provider that implements the S3 interface
+
+Security
+
+
+(Appears on:
+TiDBConfig )
+
+
+
Security is the security section of the config.
+
+
+
+
+Field
+Description
+
+
+
+
+
+skip-grant-table
+
+bool
+
+
+
+(Optional)
+
+
+
+
+ssl-ca
+
+string
+
+
+
+(Optional)
+
+
+
+
+ssl-cert
+
+string
+
+
+
+(Optional)
+
+
+
+
+ssl-key
+
+string
+
+
+
+(Optional)
+
+
+
+
+cluster-ssl-ca
+
+string
+
+
+
+(Optional)
+
+
+
+
+cluster-ssl-cert
+
+string
+
+
+
+(Optional)
+
+
+
+
+cluster-ssl-key
+
+string
+
+
+
+(Optional)
+
+
+
+
+cluster-verify-cn
+
+[]string
+
+
+
+(Optional)
+ClusterVerifyCN is the Common Name that allowed
+
+
+
+
+Service
+
+
+(Appears on:
+TidbClusterSpec )
+
+
+
Deprecated
+Service represent service type used in TidbCluster
+
+
+
+
+Field
+Description
+
+
+
+
+
+name
+
+string
+
+
+
+
+
+
+
+type
+
+string
+
+
+
+
+
+
+
+ServiceSpec
+
+
+(Appears on:
+GrafanaSpec ,
+PDSpec ,
+PrometheusSpec ,
+ReloaderSpec ,
+TiDBServiceSpec )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+type
+
+
+Kubernetes core/v1.ServiceType
+
+
+
+
+Type of the real kubernetes service
+
+
+
+
+annotations
+
+map[string]string
+
+
+
+(Optional)
+Additional annotations of the kubernetes service object
+
+
+
+
+loadBalancerIP
+
+string
+
+
+
+(Optional)
+LoadBalancerIP is the loadBalancerIP of service
+Optional: Defaults to omitted
+
+
+
+
+clusterIP
+
+string
+
+
+
+(Optional)
+ClusterIP is the clusterIP of service
+
+
+
+
+portName
+
+string
+
+
+
+(Optional)
+PortName is the name of service port
+
+
+
+
+Status
+
+
+(Appears on:
+TiDBConfig )
+
+
+
Status is the status section of the config.
+
+
+
+
+Field
+Description
+
+
+
+
+
+metrics-addr
+
+string
+
+
+
+(Optional)
+
+
+
+
+metrics-interval
+
+uint
+
+
+
+(Optional)
+Optional: Defaults to 15
+
+
+
+
+report-status
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to true
+
+
+
+
+record-db-qps
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to false
+
+
+
+
+StmtSummary
+
+
+(Appears on:
+TiDBConfig )
+
+
+
StmtSummary is the config for statement summary.
+
+
+
+
+Field
+Description
+
+
+
+
+
+enable
+
+bool
+
+
+
+(Optional)
+Enable statement summary or not.
+
+
+
+
+max-stmt-count
+
+uint
+
+
+
+(Optional)
+The maximum number of statements kept in memory.
+Optional: Defaults to 100
+
+
+
+
+max-sql-length
+
+uint
+
+
+
+(Optional)
+The maximum length of displayed normalized SQL and sample SQL.
+Optional: Defaults to 4096
+
+
+
+
+refresh-interval
+
+int
+
+
+
+(Optional)
+The refresh interval of statement summary.
+
+
+
+
+history-size
+
+int
+
+
+
+(Optional)
+The maximum history size of statement summary.
+
+
+
+
+StorageClaim
+
+
+(Appears on:
+TiFlashSpec )
+
+
+
StorageClaim contains details of TiFlash storages
+
+
+StorageProvider
+
+
+(Appears on:
+BackupSpec ,
+RestoreSpec )
+
+
+
StorageProvider defines the configuration for storing a backup in backend storage.
+
+
+TLSCluster
+
+
+(Appears on:
+TidbClusterSpec )
+
+
+
TLSCluster can enable TLS connection between TiDB server components
+https://pingcap.com/docs/stable/how-to/secure/enable-tls-between-components/
+
+
+
+
+Field
+Description
+
+
+
+
+
+enabled
+
+bool
+
+
+
+(Optional)
+Enable mutual TLS authentication among TiDB components
+Once enabled, the mutual authentication applies to all components,
+and it does not support applying to only part of the components.
+The steps to enable this feature:
+1. Generate TiDB server components certificates and a client-side certifiacete for them.
+There are multiple ways to generate these certificates:
+- user-provided certificates: https://pingcap.com/docs/stable/how-to/secure/generate-self-signed-certificates/
+- use the K8s built-in certificate signing system signed certificates: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
+- or use cert-manager signed certificates: https://cert-manager.io/
+2. Create one secret object for one component which contains the certificates created above.
+The name of this Secret must be: --cluster-secret.
+For PD: kubectl create secret generic -pd-cluster-secret –namespace= –from-file=tls.crt= –from-file=tls.key= –from-file=ca.crt=
+For TiKV: kubectl create secret generic -tikv-cluster-secret –namespace= –from-file=tls.crt= –from-file=tls.key= –from-file=ca.crt=
+For TiDB: kubectl create secret generic -tidb-cluster-secret –namespace= –from-file=tls.crt= –from-file=tls.key= –from-file=ca.crt=
+For Client: kubectl create secret generic -cluster-client-secret –namespace= –from-file=tls.crt= –from-file=tls.key= –from-file=ca.crt=
+Same for other components.
+
+
+
+
+TiDBAccessConfig
+
+
+(Appears on:
+BackupSpec ,
+RestoreSpec )
+
+
+
TiDBAccessConfig defines the configuration for access tidb cluster
+
+
+
+
+Field
+Description
+
+
+
+
+
+host
+
+string
+
+
+
+Host is the tidb cluster access address
+
+
+
+
+port
+
+int32
+
+
+
+Port is the port number to use for connecting tidb cluster
+
+
+
+
+user
+
+string
+
+
+
+User is the user for login tidb cluster
+
+
+
+
+secretName
+
+string
+
+
+
+SecretName is the name of secret which stores tidb cluster’s password.
+
+
+
+
+tlsClient
+
+
+TiDBTLSClient
+
+
+
+
+(Optional)
+Whether enable the TLS connection between the SQL client and TiDB server
+Optional: Defaults to nil
+
+
+
+
+TiDBConfig
+
+
+(Appears on:
+TiDBSpec )
+
+
+
TiDBConfig is the configuration of tidb-server
+For more detail, refer to https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration/
+
+
+
+
+Field
+Description
+
+
+
+
+
+cors
+
+string
+
+
+
+(Optional)
+
+
+
+
+socket
+
+string
+
+
+
+(Optional)
+
+
+
+
+lease
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 45s
+
+
+
+
+run-ddl
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to true
+
+
+
+
+split-table
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to true
+
+
+
+
+token-limit
+
+uint
+
+
+
+(Optional)
+Optional: Defaults to 1000
+
+
+
+
+oom-action
+
+string
+
+
+
+(Optional)
+Optional: Defaults to log
+
+
+
+
+mem-quota-query
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 34359738368
+
+
+
+
+enable-streaming
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to false
+
+
+
+
+enable-batch-dml
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to false
+
+
+
+
+txn-local-latches
+
+
+TxnLocalLatches
+
+
+
+
+(Optional)
+
+
+
+
+lower-case-table-names
+
+int
+
+
+
+(Optional)
+
+
+
+
+log
+
+
+Log
+
+
+
+
+(Optional)
+
+
+
+
+security
+
+
+Security
+
+
+
+
+(Optional)
+
+
+
+
+status
+
+
+Status
+
+
+
+
+(Optional)
+
+
+
+
+performance
+
+
+Performance
+
+
+
+
+(Optional)
+
+
+
+
+prepared-plan-cache
+
+
+PreparedPlanCache
+
+
+
+
+(Optional)
+
+
+
+
+opentracing
+
+
+OpenTracing
+
+
+
+
+(Optional)
+
+
+
+
+proxy-protocol
+
+
+ProxyProtocol
+
+
+
+
+(Optional)
+
+
+
+
+tikv-client
+
+
+TiKVClient
+
+
+
+
+(Optional)
+
+
+
+
+binlog
+
+
+Binlog
+
+
+
+
+(Optional)
+
+
+
+
+compatible-kill-query
+
+bool
+
+
+
+(Optional)
+
+
+
+
+plugin
+
+
+Plugin
+
+
+
+
+(Optional)
+
+
+
+
+pessimistic-txn
+
+
+PessimisticTxn
+
+
+
+
+(Optional)
+
+
+
+
+check-mb4-value-in-utf8
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to true
+
+
+
+
+alter-primary-key
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to false
+
+
+
+
+treat-old-version-utf8-as-utf8mb4
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to true
+
+
+
+
+split-region-max-num
+
+uint64
+
+
+
+(Optional)
+Optional: Defaults to 1000
+
+
+
+
+stmt-summary
+
+
+StmtSummary
+
+
+
+
+(Optional)
+
+
+
+
+repair-mode
+
+bool
+
+
+
+(Optional)
+RepairMode indicates that the TiDB is in the repair mode for table meta.
+
+
+
+
+repair-table-list
+
+[]string
+
+
+
+(Optional)
+
+
+
+
+isolation-read
+
+
+IsolationRead
+
+
+
+
+(Optional)
+IsolationRead indicates that the TiDB reads data from which isolation level(engine and label).
+
+
+
+
+max-server-connections
+
+uint32
+
+
+
+(Optional)
+MaxServerConnections is the maximum permitted number of simultaneous client connections.
+
+
+
+
+new_collations_enabled_on_first_bootstrap
+
+bool
+
+
+
+(Optional)
+NewCollationsEnabledOnFirstBootstrap indicates if the new collations are enabled, it effects only when a TiDB cluster bootstrapped on the first time.
+
+
+
+
+experimental
+
+
+Experimental
+
+
+
+
+(Optional)
+Experimental contains parameters for experimental features.
+
+
+
+
+enable-dynamic-config
+
+bool
+
+
+
+(Optional)
+EnableDynamicConfig enables the TiDB to fetch configs from PD and update itself during runtime.
+see https://github.com/pingcap/tidb/pull/13660 for more details.
+
+
+
+
+enable-table-lock
+
+bool
+
+
+
+imported from v3.1.0
+optional
+
+
+
+
+delay-clean-table-lock
+
+uint64
+
+
+
+imported from v3.1.0
+optional
+
+
+
+
+TiDBFailureMember
+
+
+(Appears on:
+TiDBStatus )
+
+
+
TiDBFailureMember is the tidb failure member information
+
+
+TiDBMember
+
+
+(Appears on:
+TiDBStatus )
+
+
+
TiDBMember is TiDB member
+
+
+
+
+Field
+Description
+
+
+
+
+
+name
+
+string
+
+
+
+
+
+
+
+health
+
+bool
+
+
+
+
+
+
+
+lastTransitionTime
+
+
+Kubernetes meta/v1.Time
+
+
+
+
+Last time the health transitioned from one to another.
+
+
+
+
+node
+
+string
+
+
+
+Node hosting pod of this TiDB member.
+
+
+
+
+TiDBServiceSpec
+
+
+(Appears on:
+TiDBSpec )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+ServiceSpec
+
+
+ServiceSpec
+
+
+
+
+
+
+
+
+externalTrafficPolicy
+
+
+Kubernetes core/v1.ServiceExternalTrafficPolicyType
+
+
+
+
+(Optional)
+ExternalTrafficPolicy of the service
+Optional: Defaults to omitted
+
+
+
+
+exposeStatus
+
+bool
+
+
+
+(Optional)
+Whether expose the status port
+Optional: Defaults to true
+
+
+
+
+TiDBSlowLogTailerSpec
+
+
+(Appears on:
+TiDBSpec )
+
+
+
TiDBSlowLogTailerSpec represents an optional log tailer sidecar with TiDB
+
+
+
+
+Field
+Description
+
+
+
+
+
+ResourceRequirements
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+
+
+
+(Members of ResourceRequirements
are embedded into this type.)
+
+
+
+
+
+image
+
+string
+
+
+
+Image used for slowlog tailer
+Deprecated, use TidbCluster.HelperImage instead
+
+
+
+
+imagePullPolicy
+
+
+Kubernetes core/v1.PullPolicy
+
+
+
+
+ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present
+Deprecated, use TidbCluster.HelperImagePullPolicy instead
+
+
+
+
+TiDBSpec
+
+
+(Appears on:
+TidbClusterSpec )
+
+
+
TiDBSpec contains details of TiDB members
+
+
+
+
+Field
+Description
+
+
+
+
+
+ComponentSpec
+
+
+ComponentSpec
+
+
+
+
+
+(Members of ComponentSpec
are embedded into this type.)
+
+
+
+
+
+ResourceRequirements
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+
+
+
+(Members of ResourceRequirements
are embedded into this type.)
+
+
+
+
+
+replicas
+
+int32
+
+
+
+The desired ready replicas
+
+
+
+
+baseImage
+
+string
+
+
+
+(Optional)
+TODO: remove optional after defaulting introduced
+Base image of the component, image tag is now allowed during validation
+
+
+
+
+service
+
+
+TiDBServiceSpec
+
+
+
+
+(Optional)
+Service defines a Kubernetes service of TiDB cluster.
+Optional: No kubernetes service will be created by default.
+
+
+
+
+binlogEnabled
+
+bool
+
+
+
+(Optional)
+Whether enable TiDB Binlog, it is encouraged to not set this field and rely on the default behavior
+Optional: Defaults to true if PumpSpec is non-nil, otherwise false
+
+
+
+
+maxFailoverCount
+
+int32
+
+
+
+(Optional)
+MaxFailoverCount limit the max replicas could be added in failover, 0 means no failover
+Optional: Defaults to 3
+
+
+
+
+separateSlowLog
+
+bool
+
+
+
+(Optional)
+Whether output the slow log in an separate sidecar container
+Optional: Defaults to true
+
+
+
+
+tlsClient
+
+
+TiDBTLSClient
+
+
+
+
+(Optional)
+Whether enable the TLS connection between the SQL client and TiDB server
+Optional: Defaults to nil
+
+
+
+
+slowLogTailer
+
+
+TiDBSlowLogTailerSpec
+
+
+
+
+(Optional)
+The spec of the slow log tailer sidecar
+
+
+
+
+plugins
+
+[]string
+
+
+
+(Optional)
+Plugins is a list of plugins that are loaded by TiDB server, empty means plugin disabled
+
+
+
+
+config
+
+
+TiDBConfig
+
+
+
+
+(Optional)
+Config is the Configuration of tidb-servers
+
+
+
+
+TiDBStatus
+
+
+(Appears on:
+TidbClusterStatus )
+
+
+
TiDBStatus is TiDB status
+
+
+TiDBTLSClient
+
+
+(Appears on:
+TiDBAccessConfig ,
+TiDBSpec )
+
+
+
TiDBTLSClient can enable TLS connection between TiDB server and MySQL client
+
+
+
+
+Field
+Description
+
+
+
+
+
+enabled
+
+bool
+
+
+
+(Optional)
+When enabled, TiDB will accept TLS encrypted connections from MySQL client
+The steps to enable this feature:
+1. Generate a TiDB server-side certificate and a client-side certifiacete for the TiDB cluster.
+There are multiple ways to generate certificates:
+- user-provided certificates: https://pingcap.com/docs/stable/how-to/secure/enable-tls-clients/
+- use the K8s built-in certificate signing system signed certificates: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
+- or use cert-manager signed certificates: https://cert-manager.io/
+2. Create a K8s Secret object which contains the TiDB server-side certificate created above.
+The name of this Secret must be: -tidb-server-secret.
+kubectl create secret generic -tidb-server-secret –namespace= –from-file=tls.crt= –from-file=tls.key= –from-file=ca.crt=
+3. Create a K8s Secret object which contains the TiDB client-side certificate created above which will be used by TiDB Operator.
+The name of this Secret must be: -tidb-client-secret.
+kubectl create secret generic -tidb-client-secret –namespace= –from-file=tls.crt= –from-file=tls.key= –from-file=ca.crt=
+4. Set Enabled to true
.
+
+
+
+
+tlsSecret
+
+string
+
+
+
+(Optional)
+Specify a secret of client cert for backup/restore
+Optional: Defaults to -tidb-client-secret
+If you want to specify a secret for backup/restore, generate a Secret Object according to the third step of the above procedure, The difference is the Secret Name can be freely defined, and then copy the Secret Name to TLSSecret
+this field only work in backup/restore process
+
+
+
+
+TiFlashConfig
+
+
+(Appears on:
+TiFlashSpec )
+
+
+
TiFlashConfig is the configuration of TiFlash.
+
+
+
+
+Field
+Description
+
+
+
+
+
+config
+
+
+CommonConfig
+
+
+
+
+(Optional)
+commonConfig is the Configuration of TiFlash process
+
+
+
+
+TiFlashSpec
+
+
+(Appears on:
+TidbClusterSpec )
+
+
+
TiFlashSpec contains details of TiFlash members
+
+
+
+
+Field
+Description
+
+
+
+
+
+ComponentSpec
+
+
+ComponentSpec
+
+
+
+
+
+(Members of ComponentSpec
are embedded into this type.)
+
+
+
+
+
+ResourceRequirements
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+
+
+
+(Members of ResourceRequirements
are embedded into this type.)
+
+
+
+
+
+serviceAccount
+
+string
+
+
+
+Specify a Service Account for TiFlash
+
+
+
+
+replicas
+
+int32
+
+
+
+The desired ready replicas
+
+
+
+
+baseImage
+
+string
+
+
+
+(Optional)
+Base image of the component, image tag is now allowed during validation
+
+
+
+
+privileged
+
+bool
+
+
+
+(Optional)
+Whether create the TiFlash container in privileged mode, it is highly discouraged to enable this in
+critical environment.
+Optional: defaults to false
+
+
+
+
+maxFailoverCount
+
+int32
+
+
+
+(Optional)
+MaxFailoverCount limit the max replicas could be added in failover, 0 means no failover
+Optional: Defaults to 3
+
+
+
+
+storageClaims
+
+
+[]StorageClaim
+
+
+
+
+The persistent volume claims of the TiFlash data storages.
+TiFlash supports multiple disks.
+
+
+
+
+config
+
+
+TiFlashConfig
+
+
+
+
+(Optional)
+Config is the Configuration of TiFlash
+
+
+
+
+logTailer
+
+
+LogTailerSpec
+
+
+
+
+(Optional)
+LogTailer is the configurations of the log tailers for TiFlash
+
+
+
+
+TiKVBlockCacheConfig
+
+
+(Appears on:
+TiKVStorageConfig )
+
+
+
TiKVBlockCacheConfig is the config of a block cache
+
+
+
+
+Field
+Description
+
+
+
+
+
+shared
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to true
+
+
+
+
+capacity
+
+string
+
+
+
+(Optional)
+
+
+
+
+num-shard-bits
+
+int64
+
+
+
+(Optional)
+
+
+
+
+strict-capacity-limit
+
+bool
+
+
+
+(Optional)
+
+
+
+
+high-pri-pool-ratio
+
+float64
+
+
+
+(Optional)
+
+
+
+
+memory-allocator
+
+string
+
+
+
+(Optional)
+
+
+
+
+TiKVCfConfig
+
+
+(Appears on:
+TiKVDbConfig ,
+TiKVRaftDBConfig )
+
+
+
TiKVCfConfig is the config of a cf
+
+
+
+
+Field
+Description
+
+
+
+
+
+block-size
+
+string
+
+
+
+(Optional)
+
+
+
+
+block-cache-size
+
+string
+
+
+
+(Optional)
+
+
+
+
+disable-block-cache
+
+bool
+
+
+
+(Optional)
+
+
+
+
+cache-index-and-filter-blocks
+
+bool
+
+
+
+(Optional)
+
+
+
+
+pin-l0-filter-and-index-blocks
+
+bool
+
+
+
+(Optional)
+
+
+
+
+use-bloom-filter
+
+bool
+
+
+
+(Optional)
+
+
+
+
+optimize-filters-for-hits
+
+bool
+
+
+
+(Optional)
+
+
+
+
+whole-key-filtering
+
+bool
+
+
+
+(Optional)
+
+
+
+
+bloom-filter-bits-per-key
+
+int64
+
+
+
+(Optional)
+
+
+
+
+block-based-bloom-filter
+
+bool
+
+
+
+(Optional)
+
+
+
+
+read-amp-bytes-per-bit
+
+int64
+
+
+
+(Optional)
+
+
+
+
+compression-per-level
+
+[]string
+
+
+
+(Optional)
+
+
+
+
+write-buffer-size
+
+string
+
+
+
+(Optional)
+
+
+
+
+max-write-buffer-number
+
+int64
+
+
+
+(Optional)
+
+
+
+
+min-write-buffer-number-to-merge
+
+int64
+
+
+
+(Optional)
+
+
+
+
+max-bytes-for-level-base
+
+string
+
+
+
+(Optional)
+
+
+
+
+target-file-size-base
+
+string
+
+
+
+(Optional)
+
+
+
+
+level0-file-num-compaction-trigger
+
+int64
+
+
+
+(Optional)
+
+
+
+
+level0-slowdown-writes-trigger
+
+int64
+
+
+
+(Optional)
+
+
+
+
+level0-stop-writes-trigger
+
+int64
+
+
+
+(Optional)
+
+
+
+
+max-compaction-bytes
+
+string
+
+
+
+(Optional)
+
+
+
+
+compaction-pri
+
+int64
+
+
+
+(Optional)
+
+
+
+
+dynamic-level-bytes
+
+bool
+
+
+
+(Optional)
+
+
+
+
+num-levels
+
+int64
+
+
+
+(Optional)
+
+
+
+
+max-bytes-for-level-multiplier
+
+int64
+
+
+
+(Optional)
+
+
+
+
+compaction-style
+
+int64
+
+
+
+(Optional)
+
+
+
+
+disable-auto-compactions
+
+bool
+
+
+
+(Optional)
+
+
+
+
+soft-pending-compaction-bytes-limit
+
+string
+
+
+
+(Optional)
+
+
+
+
+hard-pending-compaction-bytes-limit
+
+string
+
+
+
+(Optional)
+
+
+
+
+force-consistency-checks
+
+bool
+
+
+
+(Optional)
+
+
+
+
+prop-size-index-distance
+
+int64
+
+
+
+(Optional)
+
+
+
+
+prop-keys-index-distance
+
+int64
+
+
+
+(Optional)
+
+
+
+
+enable-doubly-skiplist
+
+bool
+
+
+
+(Optional)
+
+
+
+
+titan
+
+
+TiKVTitanCfConfig
+
+
+
+
+(Optional)
+
+
+
+
+TiKVClient
+
+
+(Appears on:
+TiDBConfig )
+
+
+
TiKVClient is the config for tikv client.
+
+
+
+
+Field
+Description
+
+
+
+
+
+grpc-connection-count
+
+uint
+
+
+
+(Optional)
+GrpcConnectionCount is the max gRPC connections that will be established
+with each tikv-server.
+Optional: Defaults to 16
+
+
+
+
+grpc-keepalive-time
+
+uint
+
+
+
+(Optional)
+After a duration of this time in seconds if the client doesn’t see any activity it pings
+the server to see if the transport is still alive.
+Optional: Defaults to 10
+
+
+
+
+grpc-keepalive-timeout
+
+uint
+
+
+
+(Optional)
+After having pinged for keepalive check, the client waits for a duration of Timeout in seconds
+and if no activity is seen even after that the connection is closed.
+Optional: Defaults to 3
+
+
+
+
+commit-timeout
+
+string
+
+
+
+(Optional)
+CommitTimeout is the max time which command ‘commit’ will wait.
+Optional: Defaults to 41s
+
+
+
+
+max-txn-time-use
+
+uint
+
+
+
+(Optional)
+MaxTxnTimeUse is the max time a Txn may use (in seconds) from its startTS to commitTS.
+Optional: Defaults to 590
+
+
+
+
+max-batch-size
+
+uint
+
+
+
+(Optional)
+MaxBatchSize is the max batch size when calling batch commands API.
+Optional: Defaults to 128
+
+
+
+
+overload-threshold
+
+uint
+
+
+
+(Optional)
+If TiKV load is greater than this, TiDB will wait for a while to avoid little batch.
+Optional: Defaults to 200
+
+
+
+
+max-batch-wait-time
+
+time.Duration
+
+
+
+(Optional)
+MaxBatchWaitTime in nanosecond is the max wait time for batch.
+Optional: Defaults to 0
+
+
+
+
+batch-wait-size
+
+uint
+
+
+
+(Optional)
+BatchWaitSize is the max wait size for batch.
+Optional: Defaults to 8
+
+
+
+
+region-cache-ttl
+
+uint
+
+
+
+(Optional)
+If a Region has not been accessed for more than the given duration (in seconds), it
+will be reloaded from the PD.
+Optional: Defaults to 600
+
+
+
+
+store-limit
+
+int64
+
+
+
+(Optional)
+If a store has been up to the limit, it will return error for successive request to
+prevent the store occupying too much token in dispatching level.
+Optional: Defaults to 0
+
+
+
+
+copr-cache
+
+
+CoprocessorCache
+
+
+
+
+(Optional)
+
+
+
+
+TiKVConfig
+
+
+(Appears on:
+TiKVSpec )
+
+
+
TiKVConfig is the configuration of TiKV.
+
+
+TiKVCoprocessorConfig
+
+
+(Appears on:
+TiKVConfig )
+
+
+
TiKVCoprocessorConfig is the configuration of TiKV Coprocessor component.
+
+
+
+
+Field
+Description
+
+
+
+
+
+split-region-on-table
+
+bool
+
+
+
+When it is set to true
, TiKV will try to split a Region with table prefix if that Region
+crosses tables.
+It is recommended to turn off this option if there will be a large number of tables created.
+Optional: Defaults to false
+optional
+
+
+
+
+batch-split-limit
+
+int64
+
+
+
+One split check produces several split keys in batch. This config limits the number of produced
+split keys in one batch.
+optional
+
+
+
+
+region-max-size
+
+string
+
+
+
+When Region [a,e) size exceeds region-max-size
, it will be split into several Regions [a,b),
+[b,c), [c,d), [d,e) and the size of [a,b), [b,c), [c,d) will be region-split-size
(or a
+little larger). See also: region-split-size
+Optional: Defaults to 144MB
+optional
+
+
+
+
+region-split-size
+
+string
+
+
+
+When Region [a,e) size exceeds region-max-size
, it will be split into several Regions [a,b),
+[b,c), [c,d), [d,e) and the size of [a,b), [b,c), [c,d) will be region-split-size
(or a
+little larger). See also: region-max-size
+Optional: Defaults to 96MB
+optional
+
+
+
+
+region-max-keys
+
+int64
+
+
+
+When the number of keys in Region [a,e) exceeds the region-max-keys
, it will be split into
+several Regions [a,b), [b,c), [c,d), [d,e) and the number of keys in [a,b), [b,c), [c,d) will be
+region-split-keys
. See also: region-split-keys
+Optional: Defaults to 1440000
+optional
+
+
+
+
+region-split-keys
+
+int64
+
+
+
+When the number of keys in Region [a,e) exceeds the region-max-keys
, it will be split into
+several Regions [a,b), [b,c), [c,d), [d,e) and the number of keys in [a,b), [b,c), [c,d) will be
+region-split-keys
. See also: region-max-keys
+Optional: Defaults to 960000
+optional
+
+
+
+
+TiKVCoprocessorReadPoolConfig
+
+
+(Appears on:
+TiKVReadPoolConfig )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+high-concurrency
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 8
+
+
+
+
+normal-concurrency
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 8
+
+
+
+
+low-concurrency
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 8
+
+
+
+
+max-tasks-per-worker-high
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 2000
+
+
+
+
+max-tasks-per-worker-normal
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 2000
+
+
+
+
+max-tasks-per-worker-low
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 2000
+
+
+
+
+stack-size
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 10MB
+
+
+
+
+TiKVDbConfig
+
+
+(Appears on:
+TiKVConfig )
+
+
+
TiKVDbConfig is the rocksdb config.
+
+
+
+
+Field
+Description
+
+
+
+
+
+wal-recovery-mode
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 2
+
+
+
+
+wal-ttl-seconds
+
+int64
+
+
+
+(Optional)
+
+
+
+
+wal-size-limit
+
+string
+
+
+
+(Optional)
+
+
+
+
+max-total-wal-size
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 4GB
+
+
+
+
+max-background-jobs
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 8
+
+
+
+
+max-manifest-file-size
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 128MB
+
+
+
+
+create-if-missing
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to true
+
+
+
+
+max-open-files
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 40960
+
+
+
+
+enable-statistics
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to true
+
+
+
+
+stats-dump-period
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 10m
+
+
+
+
+compaction-readahead-size
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 0
+
+
+
+
+info-log-max-size
+
+string
+
+
+
+(Optional)
+
+
+
+
+info-log-roll-time
+
+string
+
+
+
+(Optional)
+
+
+
+
+info-log-keep-log-file-num
+
+int64
+
+
+
+(Optional)
+
+
+
+
+info-log-dir
+
+string
+
+
+
+(Optional)
+
+
+
+
+rate-bytes-per-sec
+
+string
+
+
+
+(Optional)
+
+
+
+
+rate-limiter-mode
+
+int64
+
+
+
+(Optional)
+
+
+
+
+auto-tuned
+
+bool
+
+
+
+(Optional)
+
+
+
+
+bytes-per-sync
+
+string
+
+
+
+(Optional)
+
+
+
+
+wal-bytes-per-sync
+
+string
+
+
+
+(Optional)
+
+
+
+
+max-sub-compactions
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 3
+
+
+
+
+writable-file-max-buffer-size
+
+string
+
+
+
+(Optional)
+
+
+
+
+use-direct-io-for-flush-and-compaction
+
+bool
+
+
+
+(Optional)
+
+
+
+
+enable-pipelined-write
+
+bool
+
+
+
+(Optional)
+
+
+
+
+defaultcf
+
+
+TiKVCfConfig
+
+
+
+
+(Optional)
+
+
+
+
+writecf
+
+
+TiKVCfConfig
+
+
+
+
+(Optional)
+
+
+
+
+lockcf
+
+
+TiKVCfConfig
+
+
+
+
+(Optional)
+
+
+
+
+raftcf
+
+
+TiKVCfConfig
+
+
+
+
+(Optional)
+
+
+
+
+titan
+
+
+TiKVTitanDBConfig
+
+
+
+
+(Optional)
+
+
+
+
+TiKVEncryptionConfig
+
+
+(Appears on:
+TiKVConfig )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+method
+
+string
+
+
+
+Encrypyion method, use data key encryption raw rocksdb data
+Possible values: plaintext, aes128-ctr, aes192-ctr, aes256-ctr
+Optional: Default to plaintext
+optional
+
+
+
+
+data-key-rotation-period
+
+string
+
+
+
+The frequency of datakey rotation, It managered by tikv
+Optional: default to 7d
+optional
+
+
+
+
+master-key
+
+
+TiKVMasterKeyConfig
+
+
+
+
+Master key config
+
+
+
+
+previous-master-key
+
+
+TiKVMasterKeyConfig
+
+
+
+
+Previous master key config
+It used in master key rotation, the data key should decryption by previous master key and then encrypytion by new master key
+
+
+
+
+TiKVFailureStore
+
+
+(Appears on:
+TiKVStatus )
+
+
+
TiKVFailureStore is the tikv failure store information
+
+
+
+
+Field
+Description
+
+
+
+
+
+podName
+
+string
+
+
+
+
+
+
+
+storeID
+
+string
+
+
+
+
+
+
+
+createdAt
+
+
+Kubernetes meta/v1.Time
+
+
+
+
+
+
+
+
+TiKVGCConfig
+
+
+(Appears on:
+TiKVConfig )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+ batch-keys
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 512
+
+
+
+
+ max-write-bytes-per-sec
+
+string
+
+
+
+(Optional)
+
+
+
+
+TiKVImportConfig
+
+
+(Appears on:
+TiKVConfig )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+import-dir
+
+string
+
+
+
+(Optional)
+
+
+
+
+num-threads
+
+int64
+
+
+
+(Optional)
+
+
+
+
+num-import-jobs
+
+int64
+
+
+
+(Optional)
+
+
+
+
+num-import-sst-jobs
+
+int64
+
+
+
+(Optional)
+
+
+
+
+max-prepare-duration
+
+string
+
+
+
+(Optional)
+
+
+
+
+region-split-size
+
+string
+
+
+
+(Optional)
+
+
+
+
+stream-channel-window
+
+int64
+
+
+
+(Optional)
+
+
+
+
+max-open-engines
+
+int64
+
+
+
+(Optional)
+
+
+
+
+upload-speed-limit
+
+string
+
+
+
+(Optional)
+
+
+
+
+TiKVMasterKeyConfig
+
+
+(Appears on:
+TiKVEncryptionConfig )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+type
+
+string
+
+
+
+Use KMS encryption or use file encryption, possible values: kms, file
+If set to kms, kms MasterKeyKMSConfig should be filled, if set to file MasterKeyFileConfig should be filled
+optional
+
+
+
+
+MasterKeyFileConfig
+
+
+MasterKeyFileConfig
+
+
+
+
+
+(Members of MasterKeyFileConfig
are embedded into this type.)
+
+Master key file config
+If the type set to file, this config should be filled
+
+
+
+
+MasterKeyKMSConfig
+
+
+MasterKeyKMSConfig
+
+
+
+
+
+(Members of MasterKeyKMSConfig
are embedded into this type.)
+
+Master key KMS config
+If the type set to kms, this config should be filled
+
+
+
+
+TiKVPDConfig
+
+
+(Appears on:
+TiKVConfig )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+endpoints
+
+[]string
+
+
+
+(Optional)
+The PD endpoints for the client.
+Default is empty.
+
+
+
+
+retry-interval
+
+string
+
+
+
+(Optional)
+The interval at which to retry a PD connection initialization.
+Default is 300ms.
+Optional: Defaults to 300ms
+
+
+
+
+retry-max-count
+
+int64
+
+
+
+(Optional)
+The maximum number of times to retry a PD connection initialization.
+Default is isize::MAX, represented by -1.
+Optional: Defaults to -1
+
+
+
+
+retry-log-every
+
+int64
+
+
+
+(Optional)
+If the client observes the same error message on retry, it can repeat the message only
+every n
times.
+Default is 10. Set to 1 to disable this feature.
+Optional: Defaults to 10
+
+
+
+
+TiKVRaftDBConfig
+
+
+(Appears on:
+TiKVConfig )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+wal-recovery-mode
+
+string
+
+
+
+(Optional)
+
+
+
+
+wal-dir
+
+string
+
+
+
+(Optional)
+
+
+
+
+wal-ttl-seconds
+
+int64
+
+
+
+(Optional)
+
+
+
+
+wal-size-limit
+
+string
+
+
+
+(Optional)
+
+
+
+
+max-total-wal-size
+
+string
+
+
+
+(Optional)
+
+
+
+
+max-background-jobs
+
+int64
+
+
+
+(Optional)
+
+
+
+
+max-manifest-file-size
+
+string
+
+
+
+(Optional)
+
+
+
+
+create-if-missing
+
+bool
+
+
+
+(Optional)
+
+
+
+
+max-open-files
+
+int64
+
+
+
+(Optional)
+
+
+
+
+enable-statistics
+
+bool
+
+
+
+(Optional)
+
+
+
+
+stats-dump-period
+
+string
+
+
+
+(Optional)
+
+
+
+
+compaction-readahead-size
+
+string
+
+
+
+(Optional)
+
+
+
+
+info-log-max-size
+
+string
+
+
+
+(Optional)
+
+
+
+
+info-log-roll-time
+
+string
+
+
+
+(Optional)
+
+
+
+
+info-log-keep-log-file-num
+
+int64
+
+
+
+(Optional)
+
+
+
+
+info-log-dir
+
+string
+
+
+
+(Optional)
+
+
+
+
+max-sub-compactions
+
+int64
+
+
+
+(Optional)
+
+
+
+
+writable-file-max-buffer-size
+
+string
+
+
+
+(Optional)
+
+
+
+
+use-direct-io-for-flush-and-compaction
+
+bool
+
+
+
+(Optional)
+
+
+
+
+enable-pipelined-write
+
+bool
+
+
+
+(Optional)
+
+
+
+
+allow-concurrent-memtable-write
+
+bool
+
+
+
+(Optional)
+
+
+
+
+bytes-per-sync
+
+string
+
+
+
+(Optional)
+
+
+
+
+wal-bytes-per-sync
+
+string
+
+
+
+(Optional)
+
+
+
+
+defaultcf
+
+
+TiKVCfConfig
+
+
+
+
+(Optional)
+
+
+
+
+TiKVRaftstoreConfig
+
+
+(Appears on:
+TiKVConfig )
+
+
+
TiKVRaftstoreConfig is the configuration of TiKV raftstore component.
+
+
+
+
+Field
+Description
+
+
+
+
+
+sync-log
+
+bool
+
+
+
+(Optional)
+true for high reliability, prevent data loss when power failure.
+Optional: Defaults to true
+
+
+
+
+prevote
+
+bool
+
+
+
+(Optional)
+Optional: Defaults to true
+
+
+
+
+raft-base-tick-interval
+
+string
+
+
+
+(Optional)
+raft-base-tick-interval is a base tick interval (ms).
+
+
+
+
+raft-heartbeat-ticks
+
+int64
+
+
+
+(Optional)
+
+
+
+
+raft-election-timeout-ticks
+
+int64
+
+
+
+(Optional)
+
+
+
+
+raft-entry-max-size
+
+string
+
+
+
+(Optional)
+When the entry exceed the max size, reject to propose it.
+Optional: Defaults to 8MB
+
+
+
+
+raft-log-gc-tick-interval
+
+string
+
+
+
+(Optional)
+Interval to gc unnecessary raft log (ms).
+Optional: Defaults to 10s
+
+
+
+
+raft-log-gc-threshold
+
+int64
+
+
+
+(Optional)
+A threshold to gc stale raft log, must >= 1.
+Optional: Defaults to 50
+
+
+
+
+raft-log-gc-count-limit
+
+int64
+
+
+
+(Optional)
+When entry count exceed this value, gc will be forced trigger.
+Optional: Defaults to 72000
+
+
+
+
+raft-log-gc-size-limit
+
+string
+
+
+
+(Optional)
+When the approximate size of raft log entries exceed this value
+gc will be forced trigger.
+Optional: Defaults to 72MB
+
+
+
+
+raft-entry-cache-life-time
+
+string
+
+
+
+(Optional)
+When a peer is not responding for this time, leader will not keep entry cache for it.
+
+
+
+
+raft-reject-transfer-leader-duration
+
+string
+
+
+
+(Optional)
+When a peer is newly added, reject transferring leader to the peer for a while.
+
+
+
+
+split-region-check-tick-interval
+
+string
+
+
+
+(Optional)
+Interval (ms) to check region whether need to be split or not.
+Optional: Defaults to 10s
+
+
+
+
+region-split-check-diff
+
+string
+
+
+
+(Optional)
+/ When size change of region exceed the diff since last check, it
+/ will be checked again whether it should be split.
+Optional: Defaults to 6MB
+
+
+
+
+region-compact-check-interval
+
+string
+
+
+
+(Optional)
+/ Interval (ms) to check whether start compaction for a region.
+Optional: Defaults to 5m
+
+
+
+
+clean-stale-peer-delay
+
+string
+
+
+
+(Optional)
+delay time before deleting a stale peer
+Optional: Defaults to 10m
+
+
+
+
+region-compact-check-step
+
+int64
+
+
+
+(Optional)
+/ Number of regions for each time checking.
+Optional: Defaults to 100
+
+
+
+
+region-compact-min-tombstones
+
+int64
+
+
+
+(Optional)
+/ Minimum number of tombstones to trigger manual compaction.
+Optional: Defaults to 10000
+
+
+
+
+region-compact-tombstones-percent
+
+int64
+
+
+
+(Optional)
+/ Minimum percentage of tombstones to trigger manual compaction.
+/ Should between 1 and 100.
+Optional: Defaults to 30
+
+
+
+
+pd-heartbeat-tick-interval
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 60s
+
+
+
+
+pd-store-heartbeat-tick-interval
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 10s
+
+
+
+
+snap-mgr-gc-tick-interval
+
+string
+
+
+
+(Optional)
+
+
+
+
+snap-gc-timeout
+
+string
+
+
+
+(Optional)
+
+
+
+
+lock-cf-compact-interval
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 10m
+
+
+
+
+lock-cf-compact-bytes-threshold
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 256MB
+
+
+
+
+notify-capacity
+
+int64
+
+
+
+(Optional)
+
+
+
+
+messages-per-tick
+
+int64
+
+
+
+(Optional)
+
+
+
+
+max-peer-down-duration
+
+string
+
+
+
+(Optional)
+/ When a peer is not active for max-peer-down-duration
+/ the peer is considered to be down and is reported to PD.
+Optional: Defaults to 5m
+
+
+
+
+max-leader-missing-duration
+
+string
+
+
+
+(Optional)
+/ If the leader of a peer is missing for longer than max-leader-missing-duration
+/ the peer would ask pd to confirm whether it is valid in any region.
+/ If the peer is stale and is not valid in any region, it will destroy itself.
+
+
+
+
+abnormal-leader-missing-duration
+
+string
+
+
+
+(Optional)
+/ Similar to the max-leader-missing-duration, instead it will log warnings and
+/ try to alert monitoring systems, if there is any.
+
+
+
+
+peer-stale-state-check-interval
+
+string
+
+
+
+(Optional)
+
+
+
+
+leader-transfer-max-log-lag
+
+int64
+
+
+
+(Optional)
+
+
+
+
+snap-apply-batch-size
+
+string
+
+
+
+(Optional)
+
+
+
+
+consistency-check-interval
+
+string
+
+
+
+(Optional)
+Interval (ms) to check region whether the data is consistent.
+Optional: Defaults to 0
+
+
+
+
+report-region-flow-interval
+
+string
+
+
+
+(Optional)
+
+
+
+
+raft-store-max-leader-lease
+
+string
+
+
+
+(Optional)
+The lease provided by a successfully proposed and applied entry.
+
+
+
+
+right-derive-when-split
+
+bool
+
+
+
+(Optional)
+Right region derive origin region id when split.
+
+
+
+
+allow-remove-leader
+
+bool
+
+
+
+(Optional)
+
+
+
+
+merge-max-log-gap
+
+int64
+
+
+
+(Optional)
+/ Max log gap allowed to propose merge.
+
+
+
+
+merge-check-tick-interval
+
+string
+
+
+
+(Optional)
+/ Interval to re-propose merge.
+
+
+
+
+use-delete-range
+
+bool
+
+
+
+(Optional)
+
+
+
+
+cleanup-import-sst-interval
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 10m
+
+
+
+
+apply-max-batch-size
+
+int64
+
+
+
+(Optional)
+
+
+
+
+apply-pool-size
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 2
+
+
+
+
+store-max-batch-size
+
+int64
+
+
+
+(Optional)
+
+
+
+
+store-pool-size
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 2
+
+
+
+
+hibernate-regions
+
+bool
+
+
+
+(Optional)
+
+
+
+
+TiKVReadPoolConfig
+
+
+(Appears on:
+TiKVConfig )
+
+
+
+
+TiKVSecurityConfig
+
+
+(Appears on:
+TiKVConfig )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+ca-path
+
+string
+
+
+
+(Optional)
+
+
+
+
+cert-path
+
+string
+
+
+
+(Optional)
+
+
+
+
+key-path
+
+string
+
+
+
+(Optional)
+
+
+
+
+cert-allowed-cn
+
+[]string
+
+
+
+(Optional)
+CertAllowedCN is the Common Name that allowed
+
+
+
+
+override-ssl-target
+
+string
+
+
+
+(Optional)
+
+
+
+
+cipher-file
+
+string
+
+
+
+(Optional)
+
+
+
+
+TiKVServerConfig
+
+
+(Appears on:
+TiKVConfig )
+
+
+
TiKVServerConfig is the configuration of TiKV server.
+
+
+
+
+Field
+Description
+
+
+
+
+
+status-thread-pool-size
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 1
+
+
+
+
+grpc-compression-type
+
+string
+
+
+
+(Optional)
+Optional: Defaults to none
+
+
+
+
+grpc-concurrency
+
+uint
+
+
+
+(Optional)
+Optional: Defaults to 4
+
+
+
+
+grpc-concurrent-stream
+
+uint
+
+
+
+(Optional)
+Optional: Defaults to 1024
+
+
+
+
+grpc-memory-pool-quota
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 32G
+
+
+
+
+grpc-raft-conn-num
+
+uint
+
+
+
+(Optional)
+Optional: Defaults to 10
+
+
+
+
+grpc-stream-initial-window-size
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 2MB
+
+
+
+
+grpc-keepalive-time
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 10s
+
+
+
+
+grpc-keepalive-timeout
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 3s
+
+
+
+
+concurrent-send-snap-limit
+
+uint
+
+
+
+(Optional)
+Optional: Defaults to 32
+
+
+
+
+concurrent-recv-snap-limit
+
+uint
+
+
+
+(Optional)
+Optional: Defaults to 32
+
+
+
+
+end-point-recursion-limit
+
+uint
+
+
+
+(Optional)
+Optional: Defaults to 1000
+
+
+
+
+end-point-stream-channel-size
+
+uint
+
+
+
+(Optional)
+
+
+
+
+end-point-batch-row-limit
+
+uint
+
+
+
+(Optional)
+
+
+
+
+end-point-stream-batch-row-limit
+
+uint
+
+
+
+(Optional)
+
+
+
+
+end-point-enable-batch-if-possible
+
+uint
+
+
+
+(Optional)
+
+
+
+
+end-point-request-max-handle-duration
+
+string
+
+
+
+(Optional)
+
+
+
+
+snap-max-write-bytes-per-sec
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 100MB
+
+
+
+
+snap-max-total-size
+
+string
+
+
+
+(Optional)
+
+
+
+
+stats-concurrency
+
+uint
+
+
+
+(Optional)
+
+
+
+
+heavy-load-threshold
+
+uint
+
+
+
+(Optional)
+
+
+
+
+heavy-load-wait-duration
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 60s
+
+
+
+
+labels
+
+map[string]string
+
+
+
+(Optional)
+
+
+
+
+TiKVSpec
+
+
+(Appears on:
+TidbClusterSpec )
+
+
+
TiKVSpec contains details of TiKV members
+
+
+
+
+Field
+Description
+
+
+
+
+
+ComponentSpec
+
+
+ComponentSpec
+
+
+
+
+
+(Members of ComponentSpec
are embedded into this type.)
+
+
+
+
+
+ResourceRequirements
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+
+
+
+(Members of ResourceRequirements
are embedded into this type.)
+
+
+
+
+
+serviceAccount
+
+string
+
+
+
+Specify a Service Account for tikv
+
+
+
+
+replicas
+
+int32
+
+
+
+The desired ready replicas
+
+
+
+
+baseImage
+
+string
+
+
+
+(Optional)
+TODO: remove optional after defaulting introduced
+Base image of the component, image tag is now allowed during validation
+
+
+
+
+privileged
+
+bool
+
+
+
+(Optional)
+Whether create the TiKV container in privileged mode, it is highly discouraged to enable this in
+critical environment.
+Optional: defaults to false
+
+
+
+
+maxFailoverCount
+
+int32
+
+
+
+(Optional)
+MaxFailoverCount limit the max replicas could be added in failover, 0 means no failover
+Optional: Defaults to 3
+
+
+
+
+storageClassName
+
+string
+
+
+
+(Optional)
+The storageClassName of the persistent volume for TiKV data storage.
+Defaults to Kubernetes default storage class.
+
+
+
+
+config
+
+
+TiKVConfig
+
+
+
+
+(Optional)
+Config is the Configuration of tikv-servers
+
+
+
+
+TiKVStatus
+
+
+(Appears on:
+TidbClusterStatus )
+
+
+
TiKVStatus is TiKV status
+
+
+TiKVStorageConfig
+
+
+(Appears on:
+TiKVConfig )
+
+
+
TiKVStorageConfig is the config of storage
+
+
+
+
+Field
+Description
+
+
+
+
+
+max-key-size
+
+int64
+
+
+
+(Optional)
+
+
+
+
+scheduler-notify-capacity
+
+int64
+
+
+
+(Optional)
+
+
+
+
+scheduler-concurrency
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 2048000
+
+
+
+
+scheduler-worker-pool-size
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 4
+
+
+
+
+scheduler-pending-write-threshold
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 100MB
+
+
+
+
+block-cache
+
+
+TiKVBlockCacheConfig
+
+
+
+
+(Optional)
+
+
+
+
+TiKVStorageReadPoolConfig
+
+
+(Appears on:
+TiKVReadPoolConfig )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+high-concurrency
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 4
+
+
+
+
+normal-concurrency
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 4
+
+
+
+
+low-concurrency
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 4
+
+
+
+
+max-tasks-per-worker-high
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 2000
+
+
+
+
+max-tasks-per-worker-normal
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 2000
+
+
+
+
+max-tasks-per-worker-low
+
+int64
+
+
+
+(Optional)
+Optional: Defaults to 2000
+
+
+
+
+stack-size
+
+string
+
+
+
+(Optional)
+Optional: Defaults to 10MB
+
+
+
+
+TiKVStore
+
+
+(Appears on:
+TiKVStatus )
+
+
+
TiKVStores is either Up/Down/Offline/Tombstone
+
+
+
+
+Field
+Description
+
+
+
+
+
+id
+
+string
+
+
+
+store id is also uint64, due to the same reason as pd id, we store id as string
+
+
+
+
+podName
+
+string
+
+
+
+
+
+
+
+ip
+
+string
+
+
+
+
+
+
+
+leaderCount
+
+int32
+
+
+
+
+
+
+
+state
+
+string
+
+
+
+
+
+
+
+lastHeartbeatTime
+
+
+Kubernetes meta/v1.Time
+
+
+
+
+
+
+
+
+lastTransitionTime
+
+
+Kubernetes meta/v1.Time
+
+
+
+
+Last time the health transitioned from one to another.
+
+
+
+
+TiKVTitanCfConfig
+
+
+(Appears on:
+TiKVCfConfig )
+
+
+
TiKVTitanCfConfig is the titian config.
+
+
+
+
+Field
+Description
+
+
+
+
+
+min-blob-size
+
+string
+
+
+
+(Optional)
+
+
+
+
+blob-file-compression
+
+string
+
+
+
+(Optional)
+
+
+
+
+blob-cache-size
+
+string
+
+
+
+(Optional)
+
+
+
+
+min-gc-batch-size
+
+string
+
+
+
+(Optional)
+
+
+
+
+max-gc-batch-size
+
+string
+
+
+
+(Optional)
+
+
+
+
+discardable-ratio
+
+float64
+
+
+
+(Optional)
+
+
+
+
+sample-ratio
+
+float64
+
+
+
+(Optional)
+
+
+
+
+merge-small-file-threshold
+
+string
+
+
+
+(Optional)
+
+
+
+
+blob-run-mode
+
+string
+
+
+
+(Optional)
+
+
+
+
+TiKVTitanDBConfig
+
+
+(Appears on:
+TiKVDbConfig )
+
+
+
TiKVTitanDBConfig is the config a titian db.
+
+
+
+
+Field
+Description
+
+
+
+
+
+enabled
+
+bool
+
+
+
+(Optional)
+
+
+
+
+dirname
+
+string
+
+
+
+(Optional)
+
+
+
+
+disable-gc
+
+bool
+
+
+
+(Optional)
+
+
+
+
+max-background-gc
+
+int64
+
+
+
+(Optional)
+
+
+
+
+purge-obsolete-files-period
+
+string
+
+
+
+(Optional)
+The value of this field will be truncated to seconds.
+
+
+
+
+TidbAutoScalerSpec
+
+
+(Appears on:
+TidbClusterAutoScalerSpec )
+
+
+
TidbAutoScalerSpec describes the spec for tidb auto-scaling
+
+
+
+
+Field
+Description
+
+
+
+
+
+BasicAutoScalerSpec
+
+
+BasicAutoScalerSpec
+
+
+
+
+
+(Members of BasicAutoScalerSpec
are embedded into this type.)
+
+
+
+
+
+TidbAutoScalerStatus
+
+
+(Appears on:
+TidbClusterAutoSclaerStatus )
+
+
+
TidbAutoScalerStatus describe the auto-scaling status of tidb
+
+
+
+
+Field
+Description
+
+
+
+
+
+BasicAutoScalerStatus
+
+
+BasicAutoScalerStatus
+
+
+
+
+
+(Members of BasicAutoScalerStatus
are embedded into this type.)
+
+
+
+
+
+TidbClusterAutoScalerSpec
+
+
+(Appears on:
+TidbClusterAutoScaler )
+
+
+
TidbAutoScalerSpec describes the state of the TidbClusterAutoScaler
+
+
+
+
+Field
+Description
+
+
+
+
+
+cluster
+
+
+TidbClusterRef
+
+
+
+
+TidbClusterRef describe the target TidbCluster
+
+
+
+
+metricsUrl
+
+string
+
+
+
+(Optional)
+We used prometheus to fetch the metrics resources until the pd could provide it.
+MetricsUrl represents the url to fetch the metrics info
+
+
+
+
+monitor
+
+
+TidbMonitorRef
+
+
+
+
+(Optional)
+TidbMonitorRef describe the target TidbMonitor, when MetricsUrl and Monitor are both set,
+Operator will use MetricsUrl
+
+
+
+
+tikv
+
+
+TikvAutoScalerSpec
+
+
+
+
+(Optional)
+TiKV represents the auto-scaling spec for tikv
+
+
+
+
+tidb
+
+
+TidbAutoScalerSpec
+
+
+
+
+(Optional)
+TiDB represents the auto-scaling spec for tidb
+
+
+
+
+TidbClusterAutoSclaerStatus
+
+
+(Appears on:
+TidbClusterAutoScaler )
+
+
+
TidbClusterAutoSclaerStatus describe the whole status
+
+
+
+
+Field
+Description
+
+
+
+
+
+tikv
+
+
+TikvAutoScalerStatus
+
+
+
+
+(Optional)
+Tikv describes the status for the tikv in the last auto-scaling reconciliation
+
+
+
+
+tidb
+
+
+TidbAutoScalerStatus
+
+
+
+
+(Optional)
+Tidb describes the status for the tidb in the last auto-scaling reconciliation
+
+
+
+
+TidbClusterRef
+
+
+(Appears on:
+TidbClusterAutoScalerSpec ,
+TidbInitializerSpec ,
+TidbMonitorSpec )
+
+
+
TidbClusterRef reference to a TidbCluster
+
+
+
+
+Field
+Description
+
+
+
+
+
+namespace
+
+string
+
+
+
+(Optional)
+Namespace is the namespace that TidbCluster object locates,
+default to the same namespace with TidbMonitor
+
+
+
+
+name
+
+string
+
+
+
+Name is the name of TidbCluster object
+
+
+
+
+TidbClusterSpec
+
+
+(Appears on:
+TidbCluster )
+
+
+
TidbClusterSpec describes the attributes that a user creates on a tidb cluster
+
+
+
+
+Field
+Description
+
+
+
+
+
+pd
+
+
+PDSpec
+
+
+
+
+PD cluster spec
+
+
+
+
+tidb
+
+
+TiDBSpec
+
+
+
+
+TiDB cluster spec
+
+
+
+
+tikv
+
+
+TiKVSpec
+
+
+
+
+TiKV cluster spec
+
+
+
+
+tiflash
+
+
+TiFlashSpec
+
+
+
+
+(Optional)
+TiFlash cluster spec
+
+
+
+
+pump
+
+
+PumpSpec
+
+
+
+
+(Optional)
+Pump cluster spec
+
+
+
+
+helper
+
+
+HelperSpec
+
+
+
+
+(Optional)
+Helper spec
+
+
+
+
+paused
+
+bool
+
+
+
+(Optional)
+Indicates that the tidb cluster is paused and will not be processed by
+the controller.
+
+
+
+
+version
+
+string
+
+
+
+(Optional)
+TODO: remove optional after defaulting logic introduced
+TiDB cluster version
+
+
+
+
+schedulerName
+
+string
+
+
+
+SchedulerName of TiDB cluster Pods
+
+
+
+
+pvReclaimPolicy
+
+
+Kubernetes core/v1.PersistentVolumeReclaimPolicy
+
+
+
+
+Persistent volume reclaim policy applied to the PVs that consumed by TiDB cluster
+
+
+
+
+imagePullPolicy
+
+
+Kubernetes core/v1.PullPolicy
+
+
+
+
+ImagePullPolicy of TiDB cluster Pods
+
+
+
+
+configUpdateStrategy
+
+
+ConfigUpdateStrategy
+
+
+
+
+ConfigUpdateStrategy determines how the configuration change is applied to the cluster.
+UpdateStrategyInPlace will update the ConfigMap of configuration in-place and an extra rolling-update of the
+cluster component is needed to reload the configuration change.
+UpdateStrategyRollingUpdate will create a new ConfigMap with the new configuration and rolling-update the
+related components to use the new ConfigMap, that is, the new configuration will be applied automatically.
+
+
+
+
+enablePVReclaim
+
+bool
+
+
+
+(Optional)
+Whether enable PVC reclaim for orphan PVC left by statefulset scale-in
+Optional: Defaults to false
+
+
+
+
+tlsCluster
+
+
+TLSCluster
+
+
+
+
+(Optional)
+Whether enable the TLS connection between TiDB server components
+Optional: Defaults to nil
+
+
+
+
+hostNetwork
+
+bool
+
+
+
+(Optional)
+Whether Hostnetwork is enabled for TiDB cluster Pods
+Optional: Defaults to false
+
+
+
+
+affinity
+
+
+Kubernetes core/v1.Affinity
+
+
+
+
+(Optional)
+Affinity of TiDB cluster Pods
+
+
+
+
+priorityClassName
+
+string
+
+
+
+(Optional)
+PriorityClassName of TiDB cluster Pods
+Optional: Defaults to omitted
+
+
+
+
+nodeSelector
+
+map[string]string
+
+
+
+(Optional)
+Base node selectors of TiDB cluster Pods, components may add or override selectors upon this respectively
+
+
+
+
+annotations
+
+map[string]string
+
+
+
+(Optional)
+Base annotations of TiDB cluster Pods, components may add or override selectors upon this respectively
+
+
+
+
+tolerations
+
+
+[]Kubernetes core/v1.Toleration
+
+
+
+
+(Optional)
+Base tolerations of TiDB cluster Pods, components may add more tolerations upon this respectively
+
+
+
+
+timezone
+
+string
+
+
+
+(Optional)
+Time zone of TiDB cluster Pods
+Optional: Defaults to UTC
+
+
+
+
+services
+
+
+[]Service
+
+
+
+
+Services list non-headless services type used in TidbCluster
+Deprecated
+
+
+
+
+TidbClusterStatus
+
+
+(Appears on:
+TidbCluster )
+
+
+
TidbClusterStatus represents the current status of a tidb cluster.
+
+
+TidbInitializerSpec
+
+
+(Appears on:
+TidbInitializer )
+
+
+
TidbInitializer spec encode the desired state of tidb initializer Job
+
+
+
+
+Field
+Description
+
+
+
+
+
+image
+
+string
+
+
+
+
+
+
+
+cluster
+
+
+TidbClusterRef
+
+
+
+
+
+
+
+
+imagePullPolicy
+
+
+Kubernetes core/v1.PullPolicy
+
+
+
+
+(Optional)
+
+
+
+
+permitHost
+
+string
+
+
+
+(Optional)
+permitHost is the host which will only be allowed to connect to the TiDB.
+
+
+
+
+initSql
+
+string
+
+
+
+(Optional)
+InitSql is the SQL statements executed after the TiDB cluster is bootstrapped.
+
+
+
+
+initSqlConfigMap
+
+string
+
+
+
+(Optional)
+InitSqlConfigMapName reference a configmap that provide init-sql, take high precedence than initSql if set
+
+
+
+
+passwordSecret
+
+string
+
+
+
+(Optional)
+
+
+
+
+resources
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+
+
+(Optional)
+
+
+
+
+timezone
+
+string
+
+
+
+(Optional)
+Time zone of TiDB initializer Pods
+
+
+
+
+TidbInitializerStatus
+
+
+(Appears on:
+TidbInitializer )
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+JobStatus
+
+
+Kubernetes batch/v1.JobStatus
+
+
+
+
+
+(Members of JobStatus
are embedded into this type.)
+
+
+
+
+
+phase
+
+
+InitializePhase
+
+
+
+
+Phase is a user readable state inferred from the underlying Job status and TidbCluster status
+
+
+
+
+TidbMonitorRef
+
+
+(Appears on:
+TidbClusterAutoScalerSpec )
+
+
+
TidbMonitorRef reference to a TidbMonitor
+
+
+
+
+Field
+Description
+
+
+
+
+
+namespace
+
+string
+
+
+
+(Optional)
+Namespace is the namespace that TidbMonitor object locates,
+default to the same namespace with TidbClusterAutoScaler
+
+
+
+
+name
+
+string
+
+
+
+Name is the name of TidbMonitor object
+
+
+
+
+TidbMonitorSpec
+
+
+(Appears on:
+TidbMonitor )
+
+
+
TidbMonitor spec encode the desired state of tidb monitoring component
+
+
+TidbMonitorStatus
+
+
+(Appears on:
+TidbMonitor )
+
+
+
TODO: sync status
+
+TikvAutoScalerSpec
+
+
+(Appears on:
+TidbClusterAutoScalerSpec )
+
+
+
TikvAutoScalerSpec describes the spec for tikv auto-scaling
+
+
+
+
+Field
+Description
+
+
+
+
+
+BasicAutoScalerSpec
+
+
+BasicAutoScalerSpec
+
+
+
+
+
+(Members of BasicAutoScalerSpec
are embedded into this type.)
+
+
+
+
+
+TikvAutoScalerStatus
+
+
+(Appears on:
+TidbClusterAutoSclaerStatus )
+
+
+
TikvAutoScalerStatus describe the auto-scaling status of tikv
+
+
+
+
+Field
+Description
+
+
+
+
+
+BasicAutoScalerStatus
+
+
+BasicAutoScalerStatus
+
+
+
+
+
+(Members of BasicAutoScalerStatus
are embedded into this type.)
+
+
+
+
+
+TxnLocalLatches
+
+
+(Appears on:
+TiDBConfig )
+
+
+
TxnLocalLatches is the TxnLocalLatches section of the config.
+
+
+
+
+Field
+Description
+
+
+
+
+
+enabled
+
+bool
+
+
+
+(Optional)
+
+
+
+
+capacity
+
+uint
+
+
+
+(Optional)
+
+
+
+
+UnjoinedMember
+
+
+(Appears on:
+PDStatus )
+
+
+
UnjoinedMember is the pd unjoin cluster member information
+
+
+
+
+Field
+Description
+
+
+
+
+
+podName
+
+string
+
+
+
+
+
+
+
+pvcUID
+
+k8s.io/apimachinery/pkg/types.UID
+
+
+
+
+
+
+
+createdAt
+
+
+Kubernetes meta/v1.Time
+
+
+
+
+
+
+
+
+User
+
+
+
User is the configuration of users.
+
+
+
+
+Field
+Description
+
+
+
+
+
+password
+
+string
+
+
+
+(Optional)
+
+
+
+
+profile
+
+string
+
+
+
+(Optional)
+
+
+
+
+quota
+
+string
+
+
+
+(Optional)
+
+
+
+
+networks
+
+
+Networks
+
+
+
+
+(Optional)
+
+
+
+
+
+
+Generated with gen-crd-api-reference-docs
+
diff --git a/docs/api-references/template/members.tpl b/docs/api-references/template/members.tpl
new file mode 100644
index 0000000000..9f08d1aa5b
--- /dev/null
+++ b/docs/api-references/template/members.tpl
@@ -0,0 +1,48 @@
+{{ define "members" }}
+
+{{ range .Members }}
+{{ if not (hiddenMember .)}}
+
+
+ {{ fieldName . }}
+
+ {{ if linkForType .Type }}
+
+ {{ typeDisplayName .Type }}
+
+ {{ else }}
+ {{ typeDisplayName .Type }}
+ {{ end }}
+
+
+
+ {{ if fieldEmbedded . }}
+
+ (Members of {{ fieldName . }}
are embedded into this type.)
+
+ {{ end}}
+
+ {{ if isOptionalMember .}}
+ (Optional)
+ {{ end }}
+
+ {{ safe (renderComments .CommentLines) }}
+
+ {{ if and (eq (.Type.Name.Name) "ObjectMeta") }}
+ Refer to the Kubernetes API documentation for the fields of the
+ metadata
field.
+ {{ end }}
+
+ {{ if or (eq (fieldName .) "spec") }}
+
+
+
+ {{ template "members" .Type }}
+
+ {{ end }}
+
+
+{{ end }}
+{{ end }}
+
+{{ end }}
diff --git a/docs/api-references/template/pkg.tpl b/docs/api-references/template/pkg.tpl
new file mode 100644
index 0000000000..4e7fe158c5
--- /dev/null
+++ b/docs/api-references/template/pkg.tpl
@@ -0,0 +1,55 @@
+{{ define "packages" }}
+
+{{ with .packages}}
+---
+title: TiDB Operator API Document
+summary: Reference of TiDB Operator API
+category: how-to
+---
+
+API Document
+Packages:
+
+{{ end}}
+
+{{ range .packages }}
+
+ {{- packageDisplayName . -}}
+
+
+ {{ with (index .GoPackages 0 )}}
+ {{ with .DocComments }}
+
+ {{ safe (renderComments .) }}
+
+ {{ end }}
+ {{ end }}
+
+ Resource Types:
+
+ {{- range (visibleTypes (sortedTypes .Types)) -}}
+ {{ if isExportedType . -}}
+
+ {{ typeDisplayName . }}
+
+ {{- end }}
+ {{- end -}}
+
+
+ {{ range (visibleTypes (sortedTypes .Types))}}
+ {{ template "type" . }}
+ {{ end }}
+
+{{ end }}
+
+
+ Generated with gen-crd-api-reference-docs
+
+
+{{ end }}
diff --git a/docs/api-references/template/type.tpl b/docs/api-references/template/type.tpl
new file mode 100644
index 0000000000..e28b088abc
--- /dev/null
+++ b/docs/api-references/template/type.tpl
@@ -0,0 +1,58 @@
+{{ define "type" }}
+
+
+ {{- .Name.Name }}
+ {{ if eq .Kind "Alias" }}({{.Underlying}}
alias){{ end -}}
+
+{{ with (typeReferences .) }}
+
+ (Appears on:
+ {{- $prev := "" -}}
+ {{- range . -}}
+ {{- if $prev -}}, {{ end -}}
+ {{ $prev = . }}
+ {{ typeDisplayName . }}
+ {{- end -}}
+ )
+
+{{ end }}
+
+
+
+ {{ safe (renderComments .CommentLines) }}
+
+
+{{ if .Members }}
+
+
+
+ Field
+ Description
+
+
+
+ {{ if isExportedType . }}
+
+
+ apiVersion
+ string
+
+
+ {{apiGroup .}}
+
+
+
+
+
+ kind
+ string
+
+ {{.Name.Name}}
+
+ {{ end }}
+ {{ template "members" .}}
+
+
+{{ end }}
+
+{{ end }}
diff --git a/docs/aws-eks-tutorial.md b/docs/aws-eks-tutorial.md
deleted file mode 100644
index fef7883cb4..0000000000
--- a/docs/aws-eks-tutorial.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Deploy TiDB, a distributed MySQL compatible database, on Kubernetes via AWS EKS
-
-This document has been moved to [https://pingcap.com/docs/v3.0/tidb-in-kubernetes/maintain/backup-and-restore/](https://pingcap.com/docs/v3.0/tidb-in-kubernetes/maintain/backup-and-restore/).
diff --git a/docs/backup-restore.md b/docs/backup-restore.md
deleted file mode 100644
index a45c907ec6..0000000000
--- a/docs/backup-restore.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Backup and Restore a TiDB Cluster
-
-This document has been moved to [https://pingcap.com/docs/v3.0/tidb-in-kubernetes/maintain/backup-and-restore/](https://pingcap.com/docs/v3.0/tidb-in-kubernetes/maintain/backup-and-restore/).
diff --git a/docs/cli-manual.md b/docs/cli-manual.md
deleted file mode 100644
index 5970fca5bb..0000000000
--- a/docs/cli-manual.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# The TiDB Kubernetes Control(tkctl) User Manual
-
-This document has been moved to [https://pingcap.com/docs/v3.0/tidb-in-kubernetes/reference/tools/tkctl/](https://pingcap.com/docs/v3.0/tidb-in-kubernetes/reference/tools/tkctl/).
diff --git a/docs/google-kubernetes-tutorial.md b/docs/google-kubernetes-tutorial.md
deleted file mode 100644
index d8ec0936fc..0000000000
--- a/docs/google-kubernetes-tutorial.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# Deploy TiDB, a distributed MySQL compatible database, to Kubernetes on Google Cloud
-
-This document has been moved to [https://pingcap.com/docs/v3.0/tidb-in-kubernetes/get-started/deploy-tidb-from-kubernetes-gke/](https://pingcap.com/docs/v3.0/tidb-in-kubernetes/get-started/deploy-tidb-from-kubernetes-gke/).
-
diff --git a/docs/minikube-tutorial.md b/docs/minikube-tutorial.md
deleted file mode 100644
index ad83e13dbe..0000000000
--- a/docs/minikube-tutorial.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Deploy TiDB in the minikube cluster
-
-This document has been moved to [https://pingcap.com/docs/v3.0/tidb-in-kubernetes/get-started/deploy-tidb-from-kubernetes-minikube/](https://pingcap.com/docs/v3.0/tidb-in-kubernetes/get-started/deploy-tidb-from-kubernetes-minikube/).
diff --git a/docs/operation-guide.md b/docs/operation-guide.md
deleted file mode 100644
index 4e6c9ac682..0000000000
--- a/docs/operation-guide.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# TiDB Cluster Operation Guide
-
-This document has been moved to [https://pingcap.com/docs/v3.0/tidb-in-kubernetes/tidb-operator-overview/](https://pingcap.com/docs/v3.0/tidb-in-kubernetes/tidb-operator-overview/).
diff --git a/docs/references/tidb-backup-configuration.md b/docs/references/tidb-backup-configuration.md
deleted file mode 100644
index 29a582219b..0000000000
--- a/docs/references/tidb-backup-configuration.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# TiDB Backup Configuration Reference
-
-This document has been moved to [https://pingcap.com/docs/v3.0/tidb-in-kubernetes/reference/configuration/backup/](https://pingcap.com/docs/v3.0/tidb-in-kubernetes/reference/configuration/backup/).
diff --git a/docs/release-note-guide.md b/docs/release-note-guide.md
index 81abf45466..a00fb35021 100644
--- a/docs/release-note-guide.md
+++ b/docs/release-note-guide.md
@@ -6,6 +6,9 @@ When you write a release note for your pull request, make sure that your languag
- ACTION REQUIRED: Add the `timezone` support for [all charts]
+ Then, add label `release-note-action-required` onto the PR. This is required
+ by [the tool we use to generate change log](generate-changelog.md).
+
2. Every note starts with the "do" form of a verb. For example:
- Support backup to S3 with [Backup & Restore (BR)](https://github.com/pingcap/br)
diff --git a/docs/setup.md b/docs/setup.md
deleted file mode 100644
index 447daaf457..0000000000
--- a/docs/setup.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# TiDB Operator Setup
-
-This document has been moved to [https://pingcap.com/docs/v3.0/tidb-in-kubernetes/deploy/tidb-operator/](https://pingcap.com/docs/v3.0/tidb-in-kubernetes/deploy/tidb-operator/).
diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md
deleted file mode 100644
index 795fa932f2..0000000000
--- a/docs/troubleshooting.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Troubleshooting
-
-This document has been moved to [https://pingcap.com/docs/v3.0/tidb-in-kubernetes/troubleshoot/](https://pingcap.com/docs/v3.0/tidb-in-kubernetes/troubleshoot/).
diff --git a/docs/user-guide.md b/docs/user-guide.md
deleted file mode 100644
index 1e4632e7b9..0000000000
--- a/docs/user-guide.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# TiDB Operator User Guide
-
-This document has been moved to [https://pingcap.com/docs/v3.0/tidb-in-kubernetes/tidb-operator-overview/](https://pingcap.com/docs/v3.0/tidb-in-kubernetes/tidb-operator-overview/).
diff --git a/examples/advanced-statefulset/tidb-cluster-scaled.yaml b/examples/advanced-statefulset/tidb-cluster-scaled.yaml
new file mode 100644
index 0000000000..c7b12fbb30
--- /dev/null
+++ b/examples/advanced-statefulset/tidb-cluster-scaled.yaml
@@ -0,0 +1,28 @@
+apiVersion: pingcap.com/v1alpha1
+kind: TidbCluster
+metadata:
+ annotations:
+ tikv.tidb.pingcap.com/delete-slots: '[1]'
+ name: asts
+spec:
+ version: v3.0.8
+ timezone: UTC
+ pvReclaimPolicy: Delete
+ pd:
+ baseImage: pingcap/pd
+ replicas: 3
+ requests:
+ storage: "1Gi"
+ config: {}
+ tikv:
+ baseImage: pingcap/tikv
+ replicas: 3
+ requests:
+ storage: "1Gi"
+ config: {}
+ tidb:
+ baseImage: pingcap/tidb
+ replicas: 2
+ service:
+ type: ClusterIP
+ config: {}
diff --git a/examples/advanced-statefulset/tidb-cluster.yaml b/examples/advanced-statefulset/tidb-cluster.yaml
new file mode 100644
index 0000000000..ea8aaa9755
--- /dev/null
+++ b/examples/advanced-statefulset/tidb-cluster.yaml
@@ -0,0 +1,26 @@
+apiVersion: pingcap.com/v1alpha1
+kind: TidbCluster
+metadata:
+ name: asts
+spec:
+ version: v3.0.8
+ timezone: UTC
+ pvReclaimPolicy: Delete
+ pd:
+ baseImage: pingcap/pd
+ replicas: 3
+ requests:
+ storage: "1Gi"
+ config: {}
+ tikv:
+ baseImage: pingcap/tikv
+ replicas: 4
+ requests:
+ storage: "1Gi"
+ config: {}
+ tidb:
+ baseImage: pingcap/tidb
+ replicas: 2
+ service:
+ type: ClusterIP
+ config: {}
diff --git a/examples/auto-scale/README.md b/examples/auto-scale/README.md
new file mode 100644
index 0000000000..5bb7277d31
--- /dev/null
+++ b/examples/auto-scale/README.md
@@ -0,0 +1,50 @@
+# Deploying TidbCluster with Auto-scaling
+
+> **Note:**
+>
+> This setup is for test or demo purpose only and **IS NOT** applicable for critical environment. Refer to the [Documents](https://pingcap.com/docs/stable/tidb-in-kubernetes/deploy/prerequisites/) for production setup.
+
+
+The following steps will create a TiDB cluster with monitoring and auto-scaler, the monitoring data is not persisted by default.
+
+**Prerequisites**:
+- Has TiDB operator `v1.1.0-beta.2` or higher version installed. [Doc](https://pingcap.com/docs/stable/tidb-in-kubernetes/deploy/tidb-operator/)
+- Has default `StorageClass` configured, and there are enough PVs (by default, 6 PVs are required) of that storageClass:
+
+ This could be verified by the following command:
+
+ ```bash
+ > kubectl get storageclass
+ ```
+
+ The output is similar to this:
+
+ ```bash
+ NAME PROVISIONER AGE
+ standard (default) kubernetes.io/gce-pd 1d
+ gold kubernetes.io/gce-pd 1d
+ ```
+
+ Alternatively, you could specify the storageClass explicitly by modifying `tidb-cluster.yaml`.
+
+
+## Enabling Auto-scaling
+
+> **Note:**
+>
+> The Auto-scaling feature is still in alpha, you should enable this feature in TiDB Operator by setting values.yaml:
+ ```yaml
+features:
+ AutoScaling=true
+```
+
+Auto-scale the cluster based on CPU load
+```bash
+> kubectl -n apply -f ./
+```
+
+## Destroy
+
+```bash
+> kubectl -n delete -f ./
+```
diff --git a/examples/auto-scale/tidb-cluster-auto-scaler.yaml b/examples/auto-scale/tidb-cluster-auto-scaler.yaml
new file mode 100644
index 0000000000..7727b8e0f2
--- /dev/null
+++ b/examples/auto-scale/tidb-cluster-auto-scaler.yaml
@@ -0,0 +1,31 @@
+apiVersion: pingcap.com/v1alpha1
+kind: TidbClusterAutoScaler
+metadata:
+ name: auto-scaling-demo
+spec:
+ cluster:
+ name: auto-scaling-demo
+ monitor:
+ name: auto-scaling-demo
+ tikv:
+ minReplicas: 3
+ maxReplicas: 4
+ metricsTimeDuration: "1m"
+ metrics:
+ - type: "Resource"
+ resource:
+ name: "cpu"
+ target:
+ type: "Utilization"
+ averageUtilization: 80
+ tidb:
+ minReplicas: 2
+ maxReplicas: 3
+ metricsTimeDuration: "1m"
+ metrics:
+ - type: "Resource"
+ resource:
+ name: "cpu"
+ target:
+ type: "Utilization"
+ averageUtilization: 80
diff --git a/examples/auto-scale/tidb-cluster.yaml b/examples/auto-scale/tidb-cluster.yaml
new file mode 100644
index 0000000000..9c3c94f86b
--- /dev/null
+++ b/examples/auto-scale/tidb-cluster.yaml
@@ -0,0 +1,29 @@
+apiVersion: pingcap.com/v1alpha1
+kind: TidbCluster
+metadata:
+ name: auto-scaling-demo
+spec:
+ version: v3.0.8
+ timezone: UTC
+ pvReclaimPolicy: Delete
+ pd:
+ baseImage: pingcap/pd
+ replicas: 3
+ requests:
+ storage: "1Gi"
+ config: {}
+ tikv:
+ baseImage: pingcap/tikv
+ replicas: 3
+ requests:
+ cpu: "1"
+ storage: "1Gi"
+ config: {}
+ tidb:
+ baseImage: pingcap/tidb
+ replicas: 2
+ service:
+ type: ClusterIP
+ config: {}
+ requests:
+ cpu: "1"
diff --git a/examples/auto-scale/tidb-monitor.yaml b/examples/auto-scale/tidb-monitor.yaml
new file mode 100644
index 0000000000..c1c99bc95d
--- /dev/null
+++ b/examples/auto-scale/tidb-monitor.yaml
@@ -0,0 +1,20 @@
+apiVersion: pingcap.com/v1alpha1
+kind: TidbMonitor
+metadata:
+ name: auto-scaling-demo
+spec:
+ clusters:
+ - name: auto-scaling-demo
+ prometheus:
+ baseImage: prom/prometheus
+ version: v2.11.1
+ grafana:
+ baseImage: grafana/grafana
+ version: 6.0.1
+ initializer:
+ baseImage: pingcap/tidb-monitor-initializer
+ version: v3.0.5
+ reloader:
+ baseImage: pingcap/tidb-monitor-reloader
+ version: v1.0.1
+ imagePullPolicy: IfNotPresent
diff --git a/examples/basic/README.md b/examples/basic/README.md
new file mode 100644
index 0000000000..9933d7e7c9
--- /dev/null
+++ b/examples/basic/README.md
@@ -0,0 +1,73 @@
+# A Basic TiDB cluster with monitoring
+
+> **Note:**
+>
+> This setup is for test or demo purpose only and **IS NOT** applicable for critical environment. Refer to the [Documents](https://pingcap.com/docs/stable/tidb-in-kubernetes/deploy/prerequisites/) for production setup.
+
+The following steps will create a TiDB cluster with monitoring, the monitoring data is not persisted by default.
+
+**Prerequisites**:
+- Has TiDB operator `v1.1.0-beta.1` or higher version installed. [Doc](https://pingcap.com/docs/stable/tidb-in-kubernetes/deploy/tidb-operator/)
+- Has default `StorageClass` configured, and there are enough PVs (by default, 6 PVs are required) of that storageClass:
+
+ This could by verified by the following command:
+
+ ```bash
+ > kubectl get storageclass
+ ```
+
+ The output is similar to this:
+
+ ```bash
+ NAME PROVISIONER AGE
+ standard (default) kubernetes.io/gce-pd 1d
+ gold kubernetes.io/gce-pd 1d
+ ```
+
+ Alternatively, you could specify the storageClass explicitly by modifying `tidb-cluster.yaml`.
+
+## Install
+
+The following commands is assumed to be executed in this directory.
+
+Install the cluster:
+
+```bash
+> kubectl -n apply -f ./
+```
+
+Wait for cluster Pods ready:
+
+```bash
+watch kubectl -n get pod
+```
+
+## Explore
+
+Explore the TiDB sql interface:
+
+```bash
+> kubectl -n port-forward svc/basic-tidb 4000:4000 &>/tmp/pf-tidb.log &
+> mysql -h 127.0.0.1 -P 4000 -u root
+```
+
+Explore the monitoring dashboards:
+
+```bash
+> kubectl -n port-forward svc/basic-grafana 4000:4000 &>/tmp/pf-grafana.log &
+```
+
+Browse [localhost:4000](http://localhost:4000).
+
+## Destroy
+
+```bash
+> kubectl -n delete -f ./
+```
+
+The PVCs used by TiDB cluster will not be deleted in the above process, therefore, the PVs will be not be released neither. You can delete PVCs and release the PVs by the following command:
+
+```bash
+> kubectl -n delete pvc app.kubernetes.io/instance=basic,app.kubernetes.io,app.kubernetes.io/managed-by=tidb-operator
+```
+
diff --git a/examples/basic/tidb-cluster.yaml b/examples/basic/tidb-cluster.yaml
new file mode 100644
index 0000000000..ae7279deb7
--- /dev/null
+++ b/examples/basic/tidb-cluster.yaml
@@ -0,0 +1,26 @@
+apiVersion: pingcap.com/v1alpha1
+kind: TidbCluster
+metadata:
+ name: basic
+spec:
+ version: v3.0.8
+ timezone: UTC
+ pvReclaimPolicy: Delete
+ pd:
+ baseImage: pingcap/pd
+ replicas: 3
+ requests:
+ storage: "1Gi"
+ config: {}
+ tikv:
+ baseImage: pingcap/tikv
+ replicas: 3
+ requests:
+ storage: "1Gi"
+ config: {}
+ tidb:
+ baseImage: pingcap/tidb
+ replicas: 2
+ service:
+ type: ClusterIP
+ config: {}
diff --git a/examples/basic/tidb-monitor.yaml b/examples/basic/tidb-monitor.yaml
new file mode 100644
index 0000000000..d314dc1f3e
--- /dev/null
+++ b/examples/basic/tidb-monitor.yaml
@@ -0,0 +1,20 @@
+apiVersion: pingcap.com/v1alpha1
+kind: TidbMonitor
+metadata:
+ name: basic
+spec:
+ clusters:
+ - name: basic
+ prometheus:
+ baseImage: prom/prometheus
+ version: v2.11.1
+ grafana:
+ baseImage: grafana/grafana
+ version: 6.0.1
+ initializer:
+ baseImage: pingcap/tidb-monitor-initializer
+ version: v3.0.5
+ reloader:
+ baseImage: pingcap/tidb-monitor-reloader
+ version: v1.0.1
+ imagePullPolicy: IfNotPresent
diff --git a/examples/initialize/README.md b/examples/initialize/README.md
new file mode 100644
index 0000000000..6e4df651ad
--- /dev/null
+++ b/examples/initialize/README.md
@@ -0,0 +1,67 @@
+# Creating TidbCluster with Initialization
+
+> **Note:**
+>
+> This setup is for test or demo purpose only and **IS NOT** applicable for critical environment. Refer to the [Documents](https://pingcap.com/docs/stable/tidb-in-kubernetes/deploy/prerequisites/) for production setup.
+
+
+The following steps will create a TiDB cluster with Initialization.
+
+**Prerequisites**:
+- Has TiDB operator `v1.1.0-beta.1` or higher version installed. [Doc](https://pingcap.com/docs/stable/tidb-in-kubernetes/deploy/tidb-operator/)
+- Has default `StorageClass` configured, and there are enough PVs (by default, 6 PVs are required) of that storageClass:
+
+ This could by verified by the following command:
+
+ ```bash
+ > kubectl get storageclass
+ ```
+
+ The output is similar to this:
+
+ ```bash
+ NAME PROVISIONER AGE
+ standard (default) kubernetes.io/gce-pd 1d
+ gold kubernetes.io/gce-pd 1d
+ ```
+
+ Alternatively, you could specify the storageClass explicitly by modifying `tidb-cluster.yaml`.
+
+
+## Initialize
+
+
+> **Note:**
+>
+> The Initialization should be done once the TiDB Cluster was created
+
+The following commands is assumed to be executed in this directory.
+
+You can create the root user and set its password by creating secret and link it to the Initializer:
+
+```bash
+> kubectl create secret generic tidb-secret --from-literal=root= --namespace=
+```
+
+You can also create other users and set their password:
+```bash
+> kubectl create secret generic tidb-secret --from-literal=root= --from-literal=developer= --namespace=
+```
+
+Initialize the cluster to create the users and create the database named `hello`:
+
+```bash
+> kubectl -n apply -f ./
+```
+
+Wait for Initialize job done:
+```bash
+$ kubectl get pod -n | grep initialize-demo-tidb-initializer
+initialize-demo-tidb-initializer-whzn7 0/1 Completed 0 57s
+```
+
+## Destroy
+
+```bash
+> kubectl -n delete -f ./
+```
diff --git a/examples/initialize/tidb-cluster.yaml b/examples/initialize/tidb-cluster.yaml
new file mode 100644
index 0000000000..1ec543ea72
--- /dev/null
+++ b/examples/initialize/tidb-cluster.yaml
@@ -0,0 +1,26 @@
+apiVersion: pingcap.com/v1alpha1
+kind: TidbCluster
+metadata:
+ name: initialize-demo
+spec:
+ version: v3.0.8
+ timezone: UTC
+ pvReclaimPolicy: Delete
+ pd:
+ baseImage: pingcap/pd
+ replicas: 1
+ requests:
+ storage: "1Gi"
+ config: {}
+ tikv:
+ baseImage: pingcap/tikv
+ replicas: 1
+ requests:
+ storage: "1Gi"
+ config: {}
+ tidb:
+ baseImage: pingcap/tidb
+ replicas: 1
+ service:
+ type: ClusterIP
+ config: {}
diff --git a/examples/initialize/tidb-initializer.yaml b/examples/initialize/tidb-initializer.yaml
new file mode 100644
index 0000000000..9067aff97b
--- /dev/null
+++ b/examples/initialize/tidb-initializer.yaml
@@ -0,0 +1,21 @@
+apiVersion: pingcap.com/v1alpha1
+kind: TidbInitializer
+metadata:
+ name: initialize-demo
+spec:
+ image: tnir/mysqlclient
+ imagePullPolicy: IfNotPresent
+ cluster:
+ name: initialize-demo
+ initSql: "create database hello;"
+ # initSqlConfigMap: tidb-initsql
+ passwordSecret: "tidb-secret"
+ # permitHost: 172.6.5.8
+ # resources:
+ # limits:
+ # cpu: 1000m
+ # memory: 500Mi
+ # requests:
+ # cpu: 100m
+ # memory: 50Mi
+ # timezone: "Asia/Shanghai"
diff --git a/examples/selfsigned-tls/selfsigned-ca.yaml b/examples/selfsigned-tls/selfsigned-ca.yaml
new file mode 100644
index 0000000000..806a78be55
--- /dev/null
+++ b/examples/selfsigned-tls/selfsigned-ca.yaml
@@ -0,0 +1,11 @@
+apiVersion: cert-manager.io/v1alpha2
+kind: Certificate
+metadata:
+ name: selfsigned-ca-cert
+spec:
+ secretName: selfsigned-ca-cert
+ commonName: "certmanager"
+ isCA: true
+ issuerRef:
+ name: selfsigned-issuer
+ kind: Issuer
diff --git a/examples/selfsigned-tls/selfsigned-cert-issuer.yaml b/examples/selfsigned-tls/selfsigned-cert-issuer.yaml
new file mode 100644
index 0000000000..934b53124d
--- /dev/null
+++ b/examples/selfsigned-tls/selfsigned-cert-issuer.yaml
@@ -0,0 +1,7 @@
+apiVersion: cert-manager.io/v1alpha2
+kind: Issuer
+metadata:
+ name: selfsigned-cert-issuer
+spec:
+ ca:
+ secretName: selfsigned-ca-cert
diff --git a/examples/selfsigned-tls/selfsigned-issuer.yaml b/examples/selfsigned-tls/selfsigned-issuer.yaml
new file mode 100644
index 0000000000..7f06abf08a
--- /dev/null
+++ b/examples/selfsigned-tls/selfsigned-issuer.yaml
@@ -0,0 +1,6 @@
+apiVersion: cert-manager.io/v1alpha2
+kind: Issuer
+metadata:
+ name: selfsigned-issuer
+spec:
+ selfSigned: {}
diff --git a/examples/selfsigned-tls/tidb-client-cert.yaml b/examples/selfsigned-tls/tidb-client-cert.yaml
new file mode 100644
index 0000000000..df740c27ed
--- /dev/null
+++ b/examples/selfsigned-tls/tidb-client-cert.yaml
@@ -0,0 +1,21 @@
+apiVersion: cert-manager.io/v1alpha2
+kind: Certificate
+metadata:
+ name: tidb-client-cert
+spec:
+ secretName: tls-tidb-client-secret # -tidb-client-secret
+ subject:
+ organizationalUnits:
+ - "TiDB Operator"
+ organization:
+ - "PingCAP"
+ duration: "8760h" # 364 days
+ # If you want verify server cert Common Name (e.g. --ssl-verify-server-cert
+ # flag in MySQL CLI), you must configure the HostName you used to connect the
+ # server here.
+ commonName: "tls-tidb-client"
+ usages:
+ - "client auth"
+ issuerRef:
+ name: selfsigned-cert-issuer
+ kind: Issuer
diff --git a/examples/selfsigned-tls/tidb-cluster.yaml b/examples/selfsigned-tls/tidb-cluster.yaml
new file mode 100644
index 0000000000..aa93ea2274
--- /dev/null
+++ b/examples/selfsigned-tls/tidb-cluster.yaml
@@ -0,0 +1,28 @@
+apiVersion: pingcap.com/v1alpha1
+kind: TidbCluster
+metadata:
+ name: tls
+spec:
+ version: v3.0.8
+ timezone: UTC
+ pvReclaimPolicy: Delete
+ pd:
+ baseImage: pingcap/pd
+ replicas: 1
+ requests:
+ storage: "1Gi"
+ config: {}
+ tikv:
+ baseImage: pingcap/tikv
+ replicas: 1
+ requests:
+ storage: "1Gi"
+ config: {}
+ tidb:
+ baseImage: pingcap/tidb
+ replicas: 1
+ service:
+ type: ClusterIP
+ config: {}
+ tlsClient:
+ enabled: true
diff --git a/examples/selfsigned-tls/tidb-server-cert.yaml b/examples/selfsigned-tls/tidb-server-cert.yaml
new file mode 100644
index 0000000000..6580dc5091
--- /dev/null
+++ b/examples/selfsigned-tls/tidb-server-cert.yaml
@@ -0,0 +1,22 @@
+apiVersion: cert-manager.io/v1alpha2
+kind: Certificate
+metadata:
+ name: tidb-server-cert
+spec:
+ secretName: tls-tidb-server-secret # -tidb-server-secret
+ subject:
+ organizationalUnits:
+ - "TiDB Operator"
+ organization:
+ - "PingCAP"
+ duration: "8760h" # 364 days
+ # If you want verify server cert Common Name (e.g. --ssl-verify-server-cert
+ # flag in MySQL CLI), you must configure the HostName you used to connect the
+ # server here.
+ commonName: "tls-tidb-server"
+ usages:
+ - "client auth"
+ - "server auth"
+ issuerRef:
+ name: selfsigned-cert-issuer
+ kind: Issuer
diff --git a/go.mod b/go.mod
index e1cfcae76e..0968f9aa4b 100644
--- a/go.mod
+++ b/go.mod
@@ -7,8 +7,10 @@ module github.com/pingcap/tidb-operator
go 1.13
require (
+ github.com/Azure/go-autorest/autorest/mocks v0.3.0 // indirect
github.com/BurntSushi/toml v0.3.1
github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e // indirect
+ github.com/Masterminds/semver v1.4.2
github.com/Microsoft/go-winio v0.4.12 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/ant31/crd-validation v0.0.0-20180702145049-30f8a35d0ac2
@@ -35,6 +37,7 @@ require (
github.com/gophercloud/gophercloud v0.3.0 // indirect
github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect
+ github.com/grpc-ecosystem/grpc-gateway v1.13.0 // indirect
github.com/imdario/mergo v0.3.7 // indirect
github.com/juju/errors v0.0.0-20180806074554-22422dad46e1
github.com/juju/loggo v0.0.0-20180524022052-584905176618 // indirect
@@ -47,7 +50,7 @@ require (
github.com/openshift/generic-admission-server v1.14.0
github.com/opentracing/opentracing-go v1.1.0 // indirect
github.com/pierrec/lz4 v2.0.5+incompatible // indirect
- github.com/pingcap/advanced-statefulset v0.3.1
+ github.com/pingcap/advanced-statefulset v0.3.2
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 // indirect
github.com/pingcap/errors v0.11.0
github.com/pingcap/kvproto v0.0.0-20191217072959-393e6c0fd4b7
diff --git a/go.sum b/go.sum
index 8e1ada0a58..383bfe3b6d 100644
--- a/go.sum
+++ b/go.sum
@@ -32,6 +32,8 @@ github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjW
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc=
github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8=
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
@@ -43,11 +45,13 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190605020000-c4ba1fdf4d36/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo=
+github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534 h1:N7lSsF+R7wSulUADi36SInSQA3RvfO/XclHQfedr0qk=
github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14=
github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA=
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e h1:eb0Pzkt15Bm7f2FFYv7sjY7NPFi3cPkS3tv1CcrFBWA=
github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
+github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc=
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc=
@@ -67,6 +71,7 @@ github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkF
github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
github.com/ant31/crd-validation v0.0.0-20180702145049-30f8a35d0ac2 h1:CDDf61yprxfS7bmBPyhH8pxaobD2VbO3d7laAxJbZos=
github.com/ant31/crd-validation v0.0.0-20180702145049-30f8a35d0ac2/go.mod h1:X0noFIik9YqfhGYBLEHg8LJKEwy7QIitLQuFMpKLcPk=
+github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
@@ -458,6 +463,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpg
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.2 h1:S+ef0492XaIknb8LMjcwgW2i3cNTzDYMmDrOThOJNWc=
github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.13.0 h1:sBDQoHXrOlfPobnKw69FIKa1wg9qsLLvvQ/Y19WtFgI=
+github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -644,8 +651,8 @@ github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+v
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
-github.com/pingcap/advanced-statefulset v0.3.1 h1:LxfAdpY2MV/b0MUlASYWjcPfUR161Xly1rA7oaIi684=
-github.com/pingcap/advanced-statefulset v0.3.1/go.mod h1:rg2p1v6AGsKhvEZi6Sm0YNYJCmdXdZZhQ6Sviei7Ivs=
+github.com/pingcap/advanced-statefulset v0.3.2 h1:cdnmWNaldoAyAWL/614Nr3hydnAzJEhSDMdIB6votZU=
+github.com/pingcap/advanced-statefulset v0.3.2/go.mod h1:rg2p1v6AGsKhvEZi6Sm0YNYJCmdXdZZhQ6Sviei7Ivs=
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg=
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ=
github.com/pingcap/errors v0.11.0 h1:DCJQB8jrHbQ1VVlMFIrbj2ApScNNotVmkSNplu2yUt4=
@@ -679,6 +686,7 @@ github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uY
github.com/robfig/cron v1.1.0 h1:jk4/Hud3TTdcrJgUOBgsqrZBarcxl6ADIjSC2iniwLY=
github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
github.com/rogpeppe/go-internal v1.0.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.0.1-alpha.3/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
@@ -862,6 +870,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191002035440-2ec189313ef0 h1:2mqDk8w/o6UmeUCu5Qiq2y7iMf6anbx+YA8d1JFoFrs=
+golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -984,6 +994,8 @@ google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRn
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601 h1:9VBRTdmgQxbs6HE0sUnMrSWNePppAJU07NYvX5dIB04=
google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c h1:hrpEMCZ2O7DR5gC1n2AJGVhrwiEjOi35+jxtIuZpTMo=
+google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@@ -991,6 +1003,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s=
+google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1001,6 +1015,7 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gcfg.v1 v1.2.0 h1:0HIbH907iBTAntm+88IJV2qmJALDAh8sPekI9Vc1fm0=
gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw=
@@ -1018,11 +1033,13 @@ gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/warnings.v0 v0.1.1 h1:XM28wIgFzaBmeZ5dNHIpWLQpt/9DGKxk+rCg/22nnYE=
gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
@@ -1083,6 +1100,7 @@ k8s.io/kubectl v0.0.0-20190918164019-21692a0861df/go.mod h1:AjffgL1ZYSrbpRJHER9v
k8s.io/kubelet v0.0.0-20190918162654-250a1838aa2c/go.mod h1:LGhpyzd/3AkWcFcQJ3yO1UxMnJ6urMkCYfCp4iVxhjs=
k8s.io/kubernetes v1.16.0 h1:WPaqle2JWogVzLxhN6IK67u62IHKKrtYF7MS4FVR4/E=
k8s.io/kubernetes v1.16.0/go.mod h1:nlP2zevWKRGKuaaVbKIwozU0Rjg9leVDXkL4YTtjmVs=
+k8s.io/legacy-cloud-providers v0.0.0-20190918163543-cfa506e53441 h1:JkEasocl8SM6+H65kaEUjtLAOFYzwaQOVTDdy5DLOXk=
k8s.io/legacy-cloud-providers v0.0.0-20190918163543-cfa506e53441/go.mod h1:Phw/j+7dcoTPXRkv9Nyi3RJuA6SVSoHlc7M5K1pHizM=
k8s.io/metrics v0.0.0-20190918162108-227c654b2546/go.mod h1:XUFuIsGbIqaUga6Ivs02cCzxNjY4RPRvYnW0KhmnpQY=
k8s.io/repo-infra v0.0.0-20181204233714-00fe14e3d1a3/go.mod h1:+G1xBfZDfVFsm1Tj/HNCvg4QqWx8rJ2Fxpqr1rqp/gQ=
diff --git a/hack/check-EOF.sh b/hack/check-EOF.sh
index 20417e109d..09336ee18a 100755
--- a/hack/check-EOF.sh
+++ b/hack/check-EOF.sh
@@ -33,6 +33,7 @@ FILELIST=($(find . -type f -not \( -path './vendor/*' \
-o -path './.idea/*' \
-o -path './.DS_Store' \
-o -path './*/.DS_Store' \
+ -o -path './data' \
\)))
NUM=0
diff --git a/hack/create-cert.sh b/hack/create-cert.sh
new file mode 100755
index 0000000000..b0dabab060
--- /dev/null
+++ b/hack/create-cert.sh
@@ -0,0 +1,151 @@
+#!/usr/bin/env bash
+
+# Copyright 2020 PingCAP, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+usage() {
+ cat <> ${tmpdir}/csr.conf
+[req]
+req_extensions = v3_req
+distinguished_name = req_distinguished_name
+[req_distinguished_name]
+[ v3_req ]
+basicConstraints = CA:FALSE
+keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+extendedKeyUsage = serverAuth
+subjectAltName = @alt_names
+[alt_names]
+DNS.1 = ${service}
+DNS.2 = ${service}.${namespace}
+DNS.3 = ${service}.${namespace}.svc
+DNS.4 = *.${service}
+DNS.5 = *.${service}.${namespace}
+DNS.5 = *.${service}.${namespace}.svc
+IP.1 = 127.0.0.1
+EOF
+
+openssl genrsa -out ${tmpdir}/server-key.pem 2048
+openssl req -new -key ${tmpdir}/server-key.pem -subj "/CN=${service}.${namespace}.svc" -out ${tmpdir}/server.csr -config ${tmpdir}/csr.conf
+
+ # clean-up any previously created CSR for our service. Ignore errors if not present.
+kubectl delete csr ${csrName} 2>/dev/null || true
+
+ # create server cert/key CSR and send to k8s API
+cat <&2
+ exit 1
+fi
+
+ echo ${serverCert} | openssl base64 -d -A -out ${tmpdir}/server-cert.pem
+
+ # create the secret with CA cert and server cert/key
+kubectl create secret tls ${secret} \
+ --key=${tmpdir}/server-key.pem \
+ --cert=${tmpdir}/server-cert.pem \
+ --dry-run -o yaml |
+ kubectl -n ${namespace} apply -f -
diff --git a/hack/e2e-examples.sh b/hack/e2e-examples.sh
new file mode 100755
index 0000000000..895d93ce66
--- /dev/null
+++ b/hack/e2e-examples.sh
@@ -0,0 +1,51 @@
+#!/usr/bin/env bash
+
+# Copyright 2020 PingCAP, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# E2E entrypoint script for examples.
+#
+
+ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd)
+cd $ROOT
+
+source "${ROOT}/hack/lib.sh"
+
+hack::ensure_kind
+
+echo "info: create a Kubernetes cluster"
+$KIND_BIN create cluster
+
+echo "info: start tidb-operator"
+hack/local-up-operator.sh
+
+echo "info: testing examples"
+export PATH=$PATH:$OUTPUT_BIN
+hack::ensure_kubectl
+
+cnt=0
+for t in $(find tests/examples/ -regextype sed -regex '.*/[0-9]\{3\}-.*\.sh'); do
+ echo "info: testing $t"
+ $t
+ if [ $? -eq 0 ]; then
+ echo "info: test $t passed"
+ else
+ echo "error: test $t failed"
+ ((cnt++))
+ fi
+done
+if [ $cnt -gt 0 ]; then
+ echo "fatal: $cnt tests failed"
+ exit 1
+fi
diff --git a/hack/e2e-openshift.sh b/hack/e2e-openshift.sh
new file mode 100755
index 0000000000..7e5e819d31
--- /dev/null
+++ b/hack/e2e-openshift.sh
@@ -0,0 +1,131 @@
+#!/usr/bin/env bash
+
+# Copyright 2020 PingCAP, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# E2E entrypoint script for OpenShift.
+#
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd)
+cd $ROOT
+
+PULL_SECRET_FILE=${PULL_SECRET_FILE:-}
+
+if [ ! -e "$PULL_SECRET_FILE" ]; then
+ echo "error: pull secret file '$PULL_SECRET_FILE' does not exist"
+ exit 1
+fi
+
+vmx_cnt=$(grep -cw vmx /proc/cpuinfo)
+if [ "$vmx_cnt" -gt 0 ]; then
+ echo "info: nested virtualization enabled (vmx cnt: $vmx_cnt)"
+else
+ echo "error: nested virtualization not enabled, please refer to https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances"
+ exit 1
+fi
+
+echo "info: install required software packages"
+sudo yum install -y jq git make golang
+sudo yum install -y yum-utils
+sudo yum-config-manager \
+ --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+sudo yum install -y --nobest docker-ce docker-ce-cli containerd.io
+if ! systemctl is-active --quiet docker; then
+ sudo systemctl start docker
+fi
+echo "info: printing docker information"
+sudo docker info
+sudo chmod o+rw /var/run/docker.sock
+
+CRC_HOME=$HOME/.crc
+echo "info: mouting disk onto $CRC_HOME"
+if ! mountpoint $CRC_HOME &>/dev/null; then
+ sudo mkfs.ext4 -F /dev/disk/by-id/google-local-ssd-0
+ sudo rm -rf $CRC_HOME
+ mkdir $CRC_HOME
+ sudo mount /dev/disk/by-id/google-local-ssd-0 $CRC_HOME
+ sudo chown -R $(id -u):$(id -g) $CRC_HOME
+fi
+
+echo "info: downloading latest crc"
+cd $HOME
+CRC_VERSION=$(curl --retry 10 -L -s 'https://mirror.openshift.com/pub/openshift-v4/clients/crc/latest/release-info.json' | jq -r '.version.crcVersion')
+if ! test -e crc-linux-amd64.tar.xz; then
+ curl --retry 10 -LO https://mirror.openshift.com/pub/openshift-v4/clients/crc/$CRC_VERSION/crc-linux-amd64.tar.xz
+ tar -xvf crc-linux-amd64.tar.xz
+fi
+export PATH=$HOME/crc-linux-$CRC_VERSION-amd64:$PATH
+
+crc version
+
+crcStatus=$(crc status 2>/dev/null | awk '/CRC VM:/ {print $3}') || true
+if [[ "$crcStatus" == "Running" ]]; then
+ echo "info: OpenShift cluster is running"
+ crc status
+else
+ echo "info: starting OpenShift clsuter"
+ crc setup
+ crc config set cpus 6
+ crc config set memory 24576
+ crc start --pull-secret-file $PULL_SECRET_FILE
+fi
+
+echo "info: login"
+eval $(crc oc-env)
+KUBEADMIN_PASSWORD=$(cat $HOME/.crc/cache/crc_libvirt_*/kubeadmin-password)
+oc login -u kubeadmin -p "$KUBEADMIN_PASSWORD" https://api.crc.testing:6443 --insecure-skip-tls-verify
+
+echo "info: building images"
+cd $HOME/tidb-operator
+./hack/run-in-container.sh bash -c 'make docker e2e-docker
+images=(
+ tidb-operator:latest
+ tidb-backup-manager:latest
+ tidb-operator-e2e:latest
+)
+for image in ${images[@]}; do
+ docker save localhost:5000/pingcap/$image -o $image.tar.gz
+done
+'
+
+echo "info: pusing images"
+OC_PROJECT=openshift
+oc extract secret/router-ca --keys=tls.crt -n openshift-ingress-operator
+sudo mkdir /etc/docker/certs.d/default-route-openshift-image-registry.apps-crc.testing/ -p
+sudo mv tls.crt /etc/docker/certs.d/default-route-openshift-image-registry.apps-crc.testing/
+docker login -u kubeadmin --password-stdin default-route-openshift-image-registry.apps-crc.testing <<< "$(oc whoami -t)"
+
+images=(
+ tidb-operator:latest
+ tidb-backup-manager:latest
+ tidb-operator-e2e:latest
+)
+for image in ${images[@]}; do
+ sudo chown -R $(id -u):$(id -g) $image.tar.gz
+ docker load -i $image.tar.gz
+ docker tag localhost:5000/pingcap/$image image-registry.openshift-image-registry.svc:5000/$OC_PROJECT/$image
+ docker tag localhost:5000/pingcap/$image default-route-openshift-image-registry.apps-crc.testing/$OC_PROJECT/$image
+ docker push default-route-openshift-image-registry.apps-crc.testing/$OC_PROJECT/$image
+done
+
+export PROVIDER=openshift
+export TIDB_OPERATOR_IMAGE=image-registry.openshift-image-registry.svc:5000/$OC_PROJECT/tidb-operator:latest
+export TIDB_BACKUP_MANAGER_IMAGE=image-registry.openshift-image-registry.svc:5000/$OC_PROJECT/tidb-backup-manager:latest
+export E2E_IMAGE=image-registry.openshift-image-registry.svc:5000/$OC_PROJECT/tidb-operator-e2e:latest
+# 'Restarter' test starts 1 replica of pd and tikv and can pass in single-node OpenShift cluster.
+./hack/run-e2e.sh --ginkgo.focus 'Restarter'
diff --git a/hack/e2e.sh b/hack/e2e.sh
index df5ace923b..3b1442d9b0 100755
--- a/hack/e2e.sh
+++ b/hack/e2e.sh
@@ -46,22 +46,39 @@ Usage: hack/e2e.sh [-h] -- [extra test args]
Environments:
- DOCKER_REGISTRY image docker registry
- IMAGE_TAG image tag
- SKIP_BUILD skip building binaries
- SKIP_IMAGE_BUILD skip build and push images
- SKIP_UP skip starting the cluster
- SKIP_DOWN skip shutting down the cluster
- REUSE_CLUSTER reuse existing cluster if found
- KUBE_VERSION the version of Kubernetes to test against
- KUBE_WORKERS the number of worker nodes (excludes master nodes), defaults: 3
- DOCKER_IO_MIRROR configure mirror for docker.io
- GCR_IO_MIRROR configure mirror for gcr.io
- QUAY_IO_MIRROR configure mirror for quay.io
- KIND_DATA_HOSTPATH (for kind) the host path of data directory for kind cluster, defaults: none
- GINKGO_NODES ginkgo nodes to run specs, defaults: 1
- GINKGO_PARALLEL if set to `y`, will run specs in parallel, the number of nodes will be the number of cpus
- GINKGO_NO_COLOR if set to `y`, suppress color output in default reporter
+ PROVIDER Kubernetes provider, e.g. kind, gke, eks, defaults: kind
+ DOCKER_REPO docker image repo
+ IMAGE_TAG image tag
+ CLUSTER the name of e2e cluster, defaults: tidb-operator
+ KUBECONFIG path to the kubeconfig file, defaults: ~/.kube/config
+ SKIP_BUILD skip building binaries
+ SKIP_IMAGE_BUILD skip build and push images
+ SKIP_IMAGE_LOAD skip load images
+ SKIP_UP skip starting the cluster
+ SKIP_DOWN skip shutting down the cluster
+ SKIP_TEST skip running the test
+ KUBE_VERSION the version of Kubernetes to test against
+ KUBE_WORKERS the number of worker nodes (excludes master nodes), defaults: 3
+ DOCKER_IO_MIRROR configure mirror for docker.io
+ GCR_IO_MIRROR configure mirror for gcr.io
+ QUAY_IO_MIRROR configure mirror for quay.io
+ KIND_DATA_HOSTPATH (kind only) the host path of data directory for kind cluster, defaults: none
+ GCP_PROJECT (gke only) the GCP project to run in
+ GCP_CREDENTIALS (gke only) the GCP service account to use
+ GCP_REGION (gke only) the GCP region, if specified a regional cluster is creaetd
+ GCP_ZONE (gke only) the GCP zone, if specified a zonal cluster is created
+ GCP_SSH_PRIVATE_KEY (gke only) the path to the private ssh key
+ GCP_SSH_PUBLIC_KEY (gke only) the path to the public ssh key
+ GCP_MACHINE_TYPE (gke only) the machine type of instance, defaults: n1-standard-4
+ AWS_ACCESS_KEY_ID (eks only) the aws access key id
+ AWS_SECRET_ACCESS_KEY (eks only) the aws secret access key
+ AWS_REGION (eks only) the aws region
+ AWS_ZONE (eks only) the aws zone
+ GINKGO_NODES ginkgo nodes to run specs, defaults: 1
+ GINKGO_PARALLEL if set to `y`, will run specs in parallel, the number of nodes will be the number of cpus
+ GINKGO_NO_COLOR if set to `y`, suppress color output in default reporter
+ RUNNER_SUITE_NAME the suite name of runner
+ SKIP_GINKGO if set to `y`, skip ginkgo
Examples:
@@ -83,12 +100,60 @@ Examples:
3) reuse the cluster and don't tear down it after the testing
- REUSE_CLUSTER=y SKIP_DOWN=y ./hack/e2e.sh --
+ # for the first time, skip the down phase
+ SKIP_DOWN=y ./hack/e2e.sh --
+ # then skip both the up/down phase in subsequent tests
+ SKIP_UP=y SKIP_DOWN=y ./hack/e2e.sh --
4) use registry mirrors
DOCKER_IO_MIRROR=https://dockerhub.azk8s.cn QUAY_IO_MIRROR=https://quay.azk8s.cn GCR_IO_MIRROR=https://gcr.azk8s.cn ./hack/e2e.sh --
+5) run e2e with gke provider locally
+
+ You need prepare GCP service account with the following permissions:
+
+ - Compute Network Admin
+ - Kubernetes Engine Admin
+ - Service Account User
+ - Storage Admin
+ - Compute Instance Admin (v1)
+
+ You can create ssh keypair with ssh-keygen at ~/.ssh/google_compute_engine
+ or specifc existing ssh keypair with following environments:
+
+ export GCP_SSH_PRIVATE_KEY=
+ export GCP_SSH_PUBLIC_KEY=
+
+ Then run with following additional GCP-specific environments:
+
+ export GCP_PROJECT=
+ export GCP_CREDENTIALS=
+ export GCP_ZONE=us-central1-b
+
+ PROVIDER=gke ./hack/e2e.sh --
+
+ If you run the outside of the dev containter started by
+ ./hack/run-in-container.sh, Google Cloud SDK must be installed on you
+ machine.
+
+6) run e2e with eks provider locally
+
+ You need configure your aws credential and region or set it via following
+ environments:
+
+ export AWS_ACCESS_KEY_ID=
+ export AWS_SECRET_ACCESS_KEY=
+ export AWS_REGION=
+
+ then run e2e with eks provider:
+
+ PROVIDER=eks ./hack/e2e.sh --
+
+ If you run the outside of the dev containter started by
+ ./hack/run-in-container.sh, AWS CLI must be installed on you
+ machine.
+
EOF
}
@@ -106,38 +171,59 @@ if [ "${1:-}" == "--" ]; then
shift
fi
-hack::ensure_kind
-hack::ensure_kubectl
-hack::ensure_helm
-
-DOCKER_REGISTRY=${DOCKER_REGISTRY:-localhost:5000}
+PROVIDER=${PROVIDER:-kind}
+DOCKER_REPO=${DOCKER_REPO:-localhost:5000/pingcap}
IMAGE_TAG=${IMAGE_TAG:-latest}
CLUSTER=${CLUSTER:-tidb-operator}
KUBECONFIG=${KUBECONFIG:-~/.kube/config}
-KUBECONTEXT=kind-$CLUSTER
SKIP_BUILD=${SKIP_BUILD:-}
SKIP_IMAGE_BUILD=${SKIP_IMAGE_BUILD:-}
+SKIP_IMAGE_LOAD=${SKIP_IMAGE_LOAD:-}
SKIP_UP=${SKIP_UP:-}
SKIP_DOWN=${SKIP_DOWN:-}
+SKIP_TEST=${SKIP_TEST:-}
REUSE_CLUSTER=${REUSE_CLUSTER:-}
KIND_DATA_HOSTPATH=${KIND_DATA_HOSTPATH:-none}
+GCP_PROJECT=${GCP_PROJECT:-}
+GCP_CREDENTIALS=${GCP_CREDENTIALS:-}
+GCP_REGION=${GCP_REGION:-}
+GCP_ZONE=${GCP_ZONE:-}
+GCP_SSH_PRIVATE_KEY=${GCP_SSH_PRIVATE_KEY:-}
+GCP_SSH_PUBLIC_KEY=${GCP_SSH_PUBLIC_KEY:-}
+GCP_MACHINE_TYPE=${GCP_MACHINE_TYPE:-n1-standard-4}
+AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-}
+AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:-}
+AWS_REGION=${AWS_REGION:-}
+AWS_ZONE=${AWS_ZONE:-}
KUBE_VERSION=${KUBE_VERSION:-v1.12.10}
KUBE_WORKERS=${KUBE_WORKERS:-3}
DOCKER_IO_MIRROR=${DOCKER_IO_MIRROR:-}
GCR_IO_MIRROR=${GCR_IO_MIRROR:-}
QUAY_IO_MIRROR=${QUAY_IO_MIRROR:-}
+SKIP_GINKGO=${SKIP_GINKGO:-}
+RUNNER_SUITE_NAME=${RUNNER_SUITE_NAME:-}
-echo "DOCKER_REGISTRY: $DOCKER_REGISTRY"
+echo "PROVIDER: $PROVIDER"
+echo "DOCKER_REPO: $DOCKER_REPO"
echo "IMAGE_TAG: $IMAGE_TAG"
echo "CLUSTER: $CLUSTER"
echo "KUBECONFIG: $KUBECONFIG"
-echo "KUBECONTEXT: $KUBECONTEXT"
echo "SKIP_BUILD: $SKIP_BUILD"
echo "SKIP_IMAGE_BUILD: $SKIP_IMAGE_BUILD"
echo "SKIP_UP: $SKIP_UP"
echo "SKIP_DOWN: $SKIP_DOWN"
echo "KIND_DATA_HOSTPATH: $KIND_DATA_HOSTPATH"
+echo "GCP_PROJECT: $GCP_PROJECT"
+echo "GCP_CREDENTIALS: $GCP_CREDENTIALS"
+echo "GCP_REGION: $GCP_REGION"
+echo "GCP_ZONE: $GCP_ZONE"
+# We shouldn't print aws credential environments.
+# echo "AWS_ACCESS_KEY_ID: $AWS_ACCESS_KEY_ID"
+# echo "AWS_SECRET_ACCESS_KEY: $AWS_SECRET_ACCESS_KEY"
+echo "AWS_REGION: $AWS_REGION"
+echo "AWS_ZONE: $AWS_ZONE"
echo "KUBE_VERSION: $KUBE_VERSION"
+echo "KUBE_WORKERS: $KUBE_WORKERS"
echo "DOCKER_IO_MIRROR: $DOCKER_IO_MIRROR"
echo "GCR_IO_MIRROR: $GCR_IO_MIRROR"
echo "QUAY_IO_MIRROR: $QUAY_IO_MIRROR"
@@ -150,7 +236,8 @@ kind_node_images["v1.13.12"]="kindest/node:v1.13.12@sha256:5e8ae1a4e39f3d151d420
kind_node_images["v1.14.10"]="kindest/node:v1.14.10@sha256:81ae5a3237c779efc4dda43cc81c696f88a194abcc4f8fa34f86cf674aa14977"
kind_node_images["v1.15.7"]="kindest/node:v1.15.7@sha256:e2df133f80ef633c53c0200114fce2ed5e1f6947477dbc83261a6a921169488d"
kind_node_images["v1.16.4"]="kindest/node:v1.16.4@sha256:b91a2c2317a000f3a783489dfb755064177dbc3a0b2f4147d50f04825d016f55"
-kind_node_images["v1.17.0"]="kindest/node:v1.17.0@sha256:9512edae126da271b66b990b6fff768fbb7cd786c7d39e86bdf55906352fdf62"
+kind_node_images["v1.17.2"]="kindest/node:v1.17.2@sha256:59df31fc61d1da5f46e8a61ef612fa53d3f9140f82419d1ef1a6b9656c6b737c"
+kind_node_images["v1.18.0"]="kindest/node:v1.18.0@sha256:0e20578828edd939d25eb98496a685c76c98d54084932f76069f886ec315d694"
function e2e::image_build() {
if [ -n "$SKIP_BUILD" ]; then
@@ -161,23 +248,8 @@ function e2e::image_build() {
echo "info: skip building and pushing images"
return
fi
- DOCKER_REGISTRY=$DOCKER_REGISTRY IMAGE_TAG=$IMAGE_TAG make docker
- DOCKER_REGISTRY=$DOCKER_REGISTRY IMAGE_TAG=$IMAGE_TAG make e2e-docker
-}
-
-function e2e::image_load() {
- local names=(
- pingcap/tidb-operator
- pingcap/tidb-operator-e2e
- )
- for n in ${names[@]}; do
- $KIND_BIN load docker-image --name $CLUSTER $DOCKER_REGISTRY/$n:$IMAGE_TAG
- done
-}
-
-function e2e::cluster_exists() {
- local name="$1"
- $KIND_BIN get clusters | grep $CLUSTER &>/dev/null
+ DOCKER_REPO=$DOCKER_REPO IMAGE_TAG=$IMAGE_TAG make docker
+ DOCKER_REPO=$DOCKER_REPO IMAGE_TAG=$IMAGE_TAG make e2e-docker
}
function e2e::__restart_docker() {
@@ -201,14 +273,6 @@ function e2e::__restart_docker() {
echo "info: done restarting docker"
}
-# e2e::__cluster_is_alive checks if the cluster is alive or not
-function e2e::__cluster_is_alive() {
- local ret=0
- echo "info: checking the cluster version"
- $KUBECTL_BIN --context $KUBECONTEXT version --short || ret=$?
- return $ret
-}
-
function e2e::__configure_docker_mirror_for_dind() {
echo "info: configure docker.io mirror '$DOCKER_IO_MIRROR' for DinD"
cat < /etc/docker/daemon.json.tmp
@@ -225,29 +289,8 @@ EOF
fi
}
-function e2e::up() {
- if [ -n "$SKIP_UP" ]; then
- echo "info: skip starting a new cluster"
- return
- fi
- if [ -n "$DOCKER_IO_MIRROR" -a -n "${DOCKER_IN_DOCKER_ENABLED:-}" ]; then
- e2e::__configure_docker_mirror_for_dind
- fi
- if e2e::cluster_exists $CLUSTER; then
- if [ -n "$REUSE_CLUSTER" ]; then
- if e2e::__cluster_is_alive; then
- echo "info: REUSE_CLUSTER is enabled and the cluster is alive, reusing it"
- return
- else
- echo "info: REUSE_CLUSTER is enabled but the cluster is not alive, trying to recreate it"
- fi
- fi
- echo "info: deleting the cluster '$CLUSTER'"
- $KIND_BIN delete cluster --name $CLUSTER
- fi
- echo "info: starting a new cluster"
- tmpfile=$(mktemp)
- trap "test -f $tmpfile && rm $tmpfile" RETURN
+function e2e::create_kindconfig() {
+ local tmpfile=${1}
cat < $tmpfile
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
@@ -315,137 +358,181 @@ EOF
EOF
fi
}
+}
+
+hack::ensure_kind
+hack::ensure_kubectl
+hack::ensure_helm
+
+e2e::image_build
+
+if [ -n "$DOCKER_IO_MIRROR" -a -n "${DOCKER_IN_DOCKER_ENABLED:-}" ]; then
+ e2e::__configure_docker_mirror_for_dind
+fi
+
+kubetest2_args=(
+ $PROVIDER
+)
+
+if [ -n "$RUNNER_SUITE_NAME" ]; then
+ kubetest2_args+=(
+ --suite-name "$RUNNER_SUITE_NAME"
+ )
+fi
+
+if [ -z "$SKIP_UP" ]; then
+ kubetest2_args+=(--up)
+fi
+
+if [ -z "$SKIP_DOWN" ]; then
+ kubetest2_args+=(--down)
+fi
+
+if [ -z "$SKIP_TEST" ]; then
+ kubetest2_args+=(--test exec)
+fi
+
+if [ "$PROVIDER" == "kind" ]; then
+ tmpfile=$(mktemp)
+ trap "test -f $tmpfile && rm $tmpfile" EXIT
+ e2e::create_kindconfig $tmpfile
echo "info: print the contents of kindconfig"
cat $tmpfile
- echo "info: end of the contents of kindconfig"
- echo "info: creating the cluster '$CLUSTER'"
- local image=""
+ image=""
for v in ${!kind_node_images[*]}; do
- if [[ "$KUBE_VERSION" == "$v" ]]; then
+ if [[ "$KUBE_VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ && "$KUBE_VERSION" == "$v" ]]; then
+ image=${kind_node_images[$v]}
+ echo "info: image for $KUBE_VERSION: $image"
+ elif [[ "$KUBE_VERSION" =~ ^v[0-9]+\.[0-9]+$ && "$KUBE_VERSION" == "${v%.*}" ]]; then
image=${kind_node_images[$v]}
echo "info: image for $KUBE_VERSION: $image"
- break
fi
done
if [ -z "$image" ]; then
echo "error: no image for $KUBE_VERSION, exit"
exit 1
fi
- # Retry on error. Sometimes, kind will fail with the following error:
- #
- # OCI runtime create failed: container_linux.go:346: starting container process caused "process_linux.go:319: getting the final child's pid from pipe caused \"EOF\"": unknown
- #
- # TODO this error should be related to docker or linux kernel, find the root cause.
- hack::wait_for_success 120 5 "$KIND_BIN create cluster --config $KUBECONFIG --name $CLUSTER --image $image --config $tmpfile -v 4"
- # make it able to schedule pods on control-plane, then less resources we required
- # This is disabled because when hostNetwork is used, pd requires 2379/2780
- # which may conflict with etcd on control-plane.
- #echo "info: remove 'node-role.kubernetes.io/master' taint from $CLUSTER-control-plane"
- #kubectl taint nodes $CLUSTER-control-plane node-role.kubernetes.io/master-
-}
-
-function e2e::__wait_for_ds() {
- local ns="$1"
- local name="$2"
- local retries="${3:-300}"
- echo "info: waiting for pods of daemonset $ns/$name are ready (retries: $retries, interval: 1s)"
- for ((i = 0; i < retries; i++)) {
- read a b <<<$($KUBECTL_BIN --context $KUBECONTEXT -n $ns get ds/$name -ojsonpath='{.status.desiredNumberScheduled} {.status.numberReady}{"\n"}')
- if [[ "$a" -gt 0 && "$a" -eq "$b" ]]; then
- echo "info: all pods of daemonset $ns/$name are ready (desired: $a, ready: $b)"
- return 0
- fi
- echo "info: pods of daemonset $ns/$name (desired: $a, ready: $b)"
- sleep 1
- }
- echo "info: timed out waiting for pods of daemonset $ns/$name are ready"
- return 1
-}
-
-function e2e::__wait_for_deploy() {
- local ns="$1"
- local name="$2"
- local retries="${3:-300}"
- echo "info: waiting for pods of deployment $ns/$name are ready (retries: $retries, interval: 1s)"
- for ((i = 0; i < retries; i++)) {
- read a b <<<$($KUBECTL_BIN --context $KUBECONTEXT -n $ns get deploy/$name -ojsonpath='{.spec.replicas} {.status.readyReplicas}{"\n"}')
- if [[ "$a" -gt 0 && "$a" -eq "$b" ]]; then
- echo "info: all pods of deployment $ns/$name are ready (desired: $a, ready: $b)"
- return 0
- fi
- echo "info: pods of deployment $ns/$name (desired: $a, ready: $b)"
- sleep 1
- }
- echo "info: timed out waiting for pods of deployment $ns/$name are ready"
- return 1
-}
-
-function e2e::setup_local_pvs() {
- echo "info: preparing disks"
- for n in $($KIND_BIN get nodes --name=$CLUSTER); do
- docker exec -i $n bash <<'EOF'
-test -d /mnt/disks || mkdir -p /mnt/disks
-df -h /mnt/disks
-if mountpoint /mnt/disks &>/dev/null; then
- echo "info: /mnt/disks is a mountpoint"
-else
- echo "info: /mnt/disks is not a mountpoint, creating local volumes on the rootfs"
-fi
-cd /mnt/disks
-for ((i = 1; i <= 32; i++)) {
- if [ ! -d vol$i ]; then
- mkdir vol$i
+ kubetest2_args+=(--image-name $image)
+ kubetest2_args+=(
+ # add some retires because kind may fail to start the cluster when the
+ # load is high
+ --up-retries 3
+ --cluster-name "$CLUSTER"
+ --config "$tmpfile"
+ --verbosity 4
+ )
+elif [ "$PROVIDER" == "gke" ]; then
+ if [ -z "$GCP_PROJECT" ]; then
+ echo "error: GCP_PROJECT is required"
+ exit 1
fi
- if ! mountpoint vol$i &>/dev/null; then
- mount --bind vol$i vol$i
+ if [ -z "$GCP_CREDENTIALS" ]; then
+ echo "error: GCP_CREDENTIALS is required"
+ exit 1
fi
-}
+ if [ -z "$GCP_REGION" -a -z "$GCP_ZONE" ]; then
+ echo "error: either GCP_REGION or GCP_ZONE must be specified"
+ exit 1
+ elif [ -n "$GCP_REGION" -a -n "$GCP_ZONE" ]; then
+ echo "error: GCP_REGION or GCP_ZONE cannot be both set"
+ exit 1
+ fi
+ echo "info: preparing ssh keypairs for GCP"
+ if [ ! -d ~/.ssh ]; then
+ mkdir ~/.ssh
+ fi
+ if [ ! -e ~/.ssh/google_compute_engine -a -n "$GCP_SSH_PRIVATE_KEY" ]; then
+ echo "Copying $GCP_SSH_PRIVATE_KEY to ~/.ssh/google_compute_engine" >&2
+ cp $GCP_SSH_PRIVATE_KEY ~/.ssh/google_compute_engine
+ chmod 0600 ~/.ssh/google_compute_engine
+ fi
+ if [ ! -e ~/.ssh/google_compute_engine.pub -a -n "$GCP_SSH_PUBLIC_KEY" ]; then
+ echo "Copying $GCP_SSH_PUBLIC_KEY to ~/.ssh/google_compute_engine.pub" >&2
+ cp $GCP_SSH_PUBLIC_KEY ~/.ssh/google_compute_engine.pub
+ chmod 0600 ~/.ssh/google_compute_engine.pub
+ fi
+ ! read -r -d '' nodePoolsJSON <&1 | awk '{print $2}')
+ [[ "$tmpv" == "$v" ]]
+ return
+ fi
+ return 1
+}
+
+function hack::__ensure_kubetest2() {
+ local n="$1"
+ if hack::__verify_kubetest2 $n $KUBETEST2_VERSION; then
+ return 0
+ fi
+ local tmpfile=$(mktemp)
+ trap "test -f $tmpfile && rm $tmpfile" RETURN
+ echo "info: downloading $n $KUBETEST2_VERSION"
+ curl --retry 10 -L -o - https://github.com/cofyc/kubetest2/releases/download/$KUBETEST2_VERSION/$n-$OS-$ARCH.gz | gunzip > $tmpfile
+ mv $tmpfile $OUTPUT_BIN/$n
+ chmod +x $OUTPUT_BIN/$n
+}
+
+function hack::ensure_kubetest2() {
+ hack::__ensure_kubetest2 kubetest2
+ hack::__ensure_kubetest2 kubetest2-gke
+ hack::__ensure_kubetest2 kubetest2-kind
+ hack::__ensure_kubetest2 kubetest2-eks
+}
+
+function hack::verify_aws_k8s_tester() {
+ if test -x $AWS_K8S_TESTER_BIN; then
+ [[ "$($AWS_K8S_TESTER_BIN version | jq '."release-version"' -r)" == "$AWS_K8S_TESTER_VERSION" ]]
+ return
+ fi
+ return 1
+}
+
+function hack::ensure_aws_k8s_tester() {
+ if hack::verify_aws_k8s_tester; then
+ return
+ fi
+ local DOWNLOAD_URL=https://github.com/aws/aws-k8s-tester/releases/download
+ local tmpfile=$(mktemp)
+ trap "test -f $tmpfile && rm $tmpfile" RETURN
+ curl --retry 10 -L -o $tmpfile https://github.com/aws/aws-k8s-tester/releases/download/$AWS_K8S_TESTER_VERSION/aws-k8s-tester-$AWS_K8S_TESTER_VERSION-$OS-$ARCH
+ mv $tmpfile $AWS_K8S_TESTER_BIN
+ chmod +x $AWS_K8S_TESTER_BIN
+}
+
+function hack::verify_gen_crd_api_references_docs() {
+ if test -x "$DOCS_BIN"; then
+ # TODO check version when the binary version is available.
+ return
+ fi
+ return 1
+}
+
+function hack::ensure_gen_crd_api_references_docs() {
+ if hack::verify_gen_crd_api_references_docs; then
+ return 0
+ fi
+ echo "Installing gen_crd_api_references_docs v$DOCS_VERSION..."
+ tmpdir=$(mktemp -d)
+ trap "test -d $tmpdir && rm -r $tmpdir" RETURN
+ curl --retry 10 -L -o ${tmpdir}/docs-bin.tar.gz https://github.com/ahmetb/gen-crd-api-reference-docs/releases/download/v${DOCS_VERSION}/gen-crd-api-reference-docs_${OS}_${ARCH}.tar.gz
+ tar -zvxf ${tmpdir}/docs-bin.tar.gz -C ${tmpdir}
+ mv ${tmpdir}/gen-crd-api-reference-docs ${DOCS_BIN}
+ chmod +x ${DOCS_BIN}
+}
diff --git a/hack/local-up-operator.sh b/hack/local-up-operator.sh
new file mode 100755
index 0000000000..2d8bdac03a
--- /dev/null
+++ b/hack/local-up-operator.sh
@@ -0,0 +1,188 @@
+#!/usr/bin/env bash
+
+# Copyright 2020 PingCAP, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This command runs tidb-operator in Kubernetes.
+#
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd)
+cd $ROOT
+
+source "${ROOT}/hack/lib.sh"
+
+function usage() {
+ cat <<'EOF'
+This commands run tidb-operator in Kubernetes.
+
+Usage: hack/local-up-operator.sh [-hd]
+
+ -h show this message and exit
+ -i install dependencies only
+
+Environments:
+
+ PROVIDER Kubernetes provider. Defaults: kind.
+ CLUSTER the name of e2e cluster. Defaults to kind for kind provider.
+ KUBECONFIG path to the kubeconfig file, defaults: ~/.kube/config
+ KUBECONTEXT context in kubeconfig file, defaults to current context
+ NAMESPACE Kubernetes namespace in which we run our tidb-operator.
+ DOCKER_REGISTRY image docker registry
+ IMAGE_TAG image tag
+ SKIP_IMAGE_BUILD skip build and push images
+
+EOF
+}
+
+installOnly=false
+while getopts "h?i" opt; do
+ case "$opt" in
+ h|\?)
+ usage
+ exit 0
+ ;;
+ i)
+ installOnly=true
+ ;;
+ esac
+done
+
+PROVIDER=${PROVIDER:-kind}
+CLUSTER=${CLUSTER:-}
+KUBECONFIG=${KUBECONFIG:-~/.kube/config}
+KUBECONTEXT=${KUBECONTEXT:-}
+NAMESPACE=${NAMESPACE:-pingcap}
+DOCKER_REGISTRY=${DOCKER_REGISTRY:-localhost:5000}
+IMAGE_TAG=${IMAGE_TAG:-latest}
+SKIP_IMAGE_BUILD=${SKIP_IMAGE_BUILD:-}
+
+hack::ensure_kubectl
+hack::ensure_kind
+hack::ensure_helm
+
+if [[ "$installOnly" == "true" ]]; then
+ exit 0
+fi
+
+function hack::create_namespace() {
+ local ns="$1"
+ $KUBECTL_BIN create namespace $ns
+ for ((i=0; i < 30; i++)); do
+ local phase=$(kubectl get ns $ns -ojsonpath='{.status.phase}')
+ if [ "$phase" == "Active" ]; then
+ return 0
+ fi
+ sleep 1
+ done
+ return 1
+}
+
+function hack::wait_for_deploy() {
+ local ns="$1"
+ local name="$2"
+ local retries="${3:-300}"
+ echo "info: waiting for pods of deployment $ns/$name are ready (retries: $retries, interval: 1s)"
+ for ((i = 0; i < retries; i++)) {
+ read a b <<<$($KUBECTL_BIN --context $KUBECONTEXT -n $ns get deploy/$name -ojsonpath='{.spec.replicas} {.status.readyReplicas}{"\n"}')
+ if [[ "$a" -gt 0 && "$a" -eq "$b" ]]; then
+ echo "info: all pods of deployment $ns/$name are ready (desired: $a, ready: $b)"
+ return 0
+ fi
+ echo "info: pods of deployment $ns/$name (desired: $a, ready: $b)"
+ sleep 1
+ }
+ echo "info: timed out waiting for pods of deployment $ns/$name are ready"
+ return 1
+}
+
+function hack::cluster_exists() {
+ local c="$1"
+ for n in $($KIND_BIN get clusters); do
+ if [ "$n" == "$c" ]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+echo "info: checking clusters"
+
+if [ "$PROVIDER" == "kind" ]; then
+ if [ -z "$CLUSTER" ]; then
+ CLUSTER=kind
+ fi
+ if ! hack::cluster_exists "$CLUSTER"; then
+ echo "error: kind cluster '$CLUSTER' not found, please create it or specify the right cluster name with CLUSTER environment"
+ exit 1
+ fi
+else
+ echo "erorr: only kind PROVIDER is supported"
+ exit 1
+fi
+
+if [ -z "$KUBECONTEXT" ]; then
+ KUBECONTEXT=$(kubectl config current-context)
+ echo "info: KUBECONTEXT is not set, current context $KUBECONTEXT is used"
+fi
+
+if [ -z "$SKIP_IMAGE_BUILD" ]; then
+ echo "info: building docker images"
+ DOCKER_REGISTRY=$DOCKER_REGISTRY IMAGE_TAG=$IMAGE_TAG make docker
+else
+ echo "info: skip building docker images"
+fi
+
+echo "info: loading images into cluster"
+images=(
+ $DOCKER_REGISTRY/pingcap/tidb-operator:${IMAGE_TAG}
+)
+for n in ${images[@]}; do
+ echo "info: loading image $n"
+ $KIND_BIN load docker-image --name $CLUSTER $n
+done
+
+echo "info: uninstall tidb-operator"
+$KUBECTL_BIN -n "$NAMESPACE" delete deploy -l app.kubernetes.io/name=tidb-operator
+$KUBECTL_BIN -n "$NAMESPACE" delete pods -l app.kubernetes.io/name=tidb-operator
+
+echo "info: create namespace '$NAMESPACE' if absent"
+if ! $KUBECTL_BIN get ns "$NAMESPACE" &>/dev/null; then
+ hack::create_namespace "$NAMESPACE"
+fi
+
+echo "info: installing crds"
+$KUBECTL_BIN apply -f manifests/crd.yaml
+
+echo "info: deploying tidb-operator"
+helm_args=(
+ template
+ --name tidb-operator-dev
+ --namespace "$NAMESPACE"
+ --set operatorImage=$DOCKER_REGISTRY/pingcap/tidb-operator:${IMAGE_TAG}
+)
+
+$HELM_BIN ${helm_args[@]} ./charts/tidb-operator/ | kubectl -n "$NAMESPACE" apply -f -
+
+deploys=(
+ tidb-controller-manager
+ tidb-scheduler
+)
+for deploy in ${deploys[@]}; do
+ echo "info: waiting for $NAMESPACE/$deploy to be ready"
+ hack::wait_for_deploy "$NAMESPACE" "$deploy"
+done
diff --git a/hack/prepare-e2e.sh b/hack/prepare-e2e.sh
index fb32d6c531..1f35640779 100755
--- a/hack/prepare-e2e.sh
+++ b/hack/prepare-e2e.sh
@@ -30,3 +30,4 @@ source "${ROOT}/hack/lib.sh"
hack::ensure_kind
hack::ensure_kubectl
hack::ensure_helm
+hack::ensure_kubetest2
diff --git a/hack/run-e2e.sh b/hack/run-e2e.sh
index e8470676ad..97961d7a58 100755
--- a/hack/run-e2e.sh
+++ b/hack/run-e2e.sh
@@ -22,33 +22,277 @@ cd $ROOT
source $ROOT/hack/lib.sh
-hack::ensure_kubectl
-hack::ensure_helm
-
+PROVIDER=${PROVIDER:-}
+CLUSTER=${CLUSTER:-}
+GCP_PROJECT=${GCP_PROJECT:-}
+GCP_REGION=${GCP_REGION:-}
+GCP_ZONE=${GCP_ZONE:-}
+GCP_CREDENTIALS=${GCP_CREDENTIALS:-}
+GCP_SDK=${GCP_SDK:-/google-cloud-sdk}
+KUBE_SSH_USER=${KUBE_SSH_USER:-vagrant}
+IMAGE_TAG=${IMAGE_TAG:-}
+SKIP_IMAGE_LOAD=${SKIP_IMAGE_LOAD:-}
TIDB_OPERATOR_IMAGE=${TIDB_OPERATOR_IMAGE:-localhost:5000/pingcap/tidb-operator:latest}
+TIDB_BACKUP_MANAGER_IMAGE=${TIDB_BACKUP_MANAGER_IMAGE:-localhost:5000/pingcap/tidb-backup-manager:latest}
E2E_IMAGE=${E2E_IMAGE:-localhost:5000/pingcap/tidb-operator-e2e:latest}
KUBECONFIG=${KUBECONFIG:-$HOME/.kube/config}
KUBECONTEXT=${KUBECONTEXT:-}
REPORT_DIR=${REPORT_DIR:-}
REPORT_PREFIX=${REPORT_PREFIX:-}
+GINKGO_NODES=${GINKGO_NODES:-}
+GINKGO_PARALLEL=${GINKGO_PARALLEL:-n} # set to 'y' to run tests in parallel
+# If 'y', Ginkgo's reporter will not print out in color when tests are run
+# in parallel
+GINKGO_NO_COLOR=${GINKGO_NO_COLOR:-n}
+GINKGO_STREAM=${GINKGO_STREAM:-y}
+SKIP_GINKGO=${SKIP_GINKGO:-}
if [ -z "$KUBECONFIG" ]; then
echo "error: KUBECONFIG is required"
exit 1
fi
+echo "KUBE_SSH_USER: $KUBE_SSH_USER"
echo "TIDB_OPERATOR_IMAGE: $TIDB_OPERATOR_IMAGE"
+echo "TIDB_BACKUP_MANAGER_IMAGE: $TIDB_BACKUP_MANAGER_IMAGE"
echo "E2E_IMAGE: $E2E_IMAGE"
echo "KUBECONFIG: $KUBECONFIG"
echo "KUBECONTEXT: $KUBECONTEXT"
echo "REPORT_DIR: $REPORT_DIR"
echo "REPORT_PREFIX: $REPORT_PREFIX"
+echo "GINKGO_NODES: $GINKGO_NODES"
+echo "GINKGO_PARALLEL: $GINKGO_PARALLEL"
+echo "GINKGO_NO_COLOR: $GINKGO_NO_COLOR"
+echo "GINKGO_STREAM: $GINKGO_STREAM"
-GINKGO_PARALLEL=${GINKGO_PARALLEL:-n} # set to 'y' to run tests in parallel
-# If 'y', Ginkgo's reporter will not print out in color when tests are run
-# in parallel
-GINKGO_NO_COLOR=${GINKGO_NO_COLOR:-n}
-GINKGO_STREAM=${GINKGO_STREAM:-y}
+function e2e::__wait_for_ds() {
+ local ns="$1"
+ local name="$2"
+ local retries="${3:-300}"
+ echo "info: waiting for pods of daemonset $ns/$name are ready (retries: $retries, interval: 1s)"
+ for ((i = 0; i < retries; i++)) {
+ read a b <<<$($KUBECTL_BIN --context $KUBECONTEXT -n $ns get ds/$name -ojsonpath='{.status.desiredNumberScheduled} {.status.numberReady}{"\n"}')
+ if [[ "$a" -gt 0 && "$a" -eq "$b" ]]; then
+ echo "info: all pods of daemonset $ns/$name are ready (desired: $a, ready: $b)"
+ return 0
+ fi
+ echo "info: pods of daemonset $ns/$name (desired: $a, ready: $b)"
+ sleep 1
+ }
+ echo "info: timed out waiting for pods of daemonset $ns/$name are ready"
+ return 1
+}
+
+function e2e::__wait_for_deploy() {
+ local ns="$1"
+ local name="$2"
+ local retries="${3:-300}"
+ echo "info: waiting for pods of deployment $ns/$name are ready (retries: $retries, interval: 1s)"
+ for ((i = 0; i < retries; i++)) {
+ read a b <<<$($KUBECTL_BIN --context $KUBECONTEXT -n $ns get deploy/$name -ojsonpath='{.spec.replicas} {.status.readyReplicas}{"\n"}')
+ if [[ "$a" -gt 0 && "$a" -eq "$b" ]]; then
+ echo "info: all pods of deployment $ns/$name are ready (desired: $a, ready: $b)"
+ return 0
+ fi
+ echo "info: pods of deployment $ns/$name (desired: $a, ready: $b)"
+ sleep 1
+ }
+ echo "info: timed out waiting for pods of deployment $ns/$name are ready"
+ return 1
+}
+
+function e2e::setup_local_pvs() {
+ echo "info: preparing local disks"
+ if [ "$PROVIDER" == "kind" ]; then
+ for n in $($KIND_BIN get nodes --name=$CLUSTER); do
+ docker exec -i $n bash <<'EOF'
+test -d /mnt/disks || mkdir -p /mnt/disks
+df -h /mnt/disks
+if mountpoint /mnt/disks &>/dev/null; then
+ echo "info: /mnt/disks is a mountpoint"
+else
+ echo "info: /mnt/disks is not a mountpoint, creating local volumes on the rootfs"
+fi
+cd /mnt/disks
+for ((i = 1; i <= 32; i++)) {
+ if [ ! -d vol$i ]; then
+ mkdir vol$i
+ fi
+ if ! mountpoint vol$i &>/dev/null; then
+ mount --bind vol$i vol$i
+ fi
+}
+EOF
+ done
+ elif [ "$PROVIDER" == "gke" ]; then
+ echo "info: provider is $PROVIDER, skipped"
+ elif [ "$PROVIDER" == "eks" ]; then
+ echo "info: provider is $PROVIDER, skipped"
+ elif [ "$PROVIDER" == "openshift" ]; then
+ CRC_IP=$(crc ip)
+ ssh -i ~/.crc/machines/crc/id_rsa -o StrictHostKeyChecking=no core@$CRC_IP <<'EOF'
+sudo bash -c '
+test -d /mnt/disks || mkdir -p /mnt/disks
+df -h /mnt/disks
+if mountpoint /mnt/disks &>/dev/null; then
+ echo "info: /mnt/disks is a mountpoint"
+else
+ echo "info: /mnt/disks is not a mountpoint, creating local volumes on the rootfs"
+fi
+cd /mnt/disks
+for ((i = 1; i <= 32; i++)) {
+ if [ ! -d vol$i ]; then
+ mkdir vol$i
+ fi
+ if ! mountpoint vol$i &>/dev/null; then
+ mount --bind vol$i vol$i
+ fi
+}
+'
+EOF
+ fi
+ echo "info: installing local-volume-provisioner"
+ $KUBECTL_BIN --context $KUBECONTEXT apply -f ${ROOT}/manifests/local-dind/local-volume-provisioner.yaml
+ e2e::__wait_for_ds kube-system local-volume-provisioner
+}
+
+function e2e::__ecr_url() {
+ local account_id=$(aws sts get-caller-identity --output text | awk '{print $1}')
+ local region=$(aws configure get region)
+ echo "${account_id}.dkr.ecr.${region}.amazonaws.com"
+}
+
+function e2e::get_kube_version() {
+ $KUBECTL_BIN --context $KUBECONTEXT version --short | awk '/Server Version:/ {print $3}'
+}
+
+function e2e::setup_helm_server() {
+ $KUBECTL_BIN --context $KUBECONTEXT apply -f ${ROOT}/manifests/tiller-rbac.yaml
+ if hack::version_ge $(e2e::get_kube_version) "v1.16.0"; then
+ # workaround for https://github.com/helm/helm/issues/6374
+ # TODO remove this when we can upgrade to helm 2.15+, see https://github.com/helm/helm/pull/6462
+ $HELM_BIN init --service-account tiller --output yaml \
+ | sed 's@apiVersion: extensions/v1beta1@apiVersion: apps/v1@' \
+ | sed 's@ replicas: 1@ replicas: 1\n selector: {"matchLabels": {"app": "helm", "name": "tiller"}}@' \
+ | $KUBECTL_BIN --context $KUBECONTEXT apply -f -
+ echo "info: wait for tiller to be ready"
+ e2e::__wait_for_deploy kube-system tiller-deploy
+ else
+ $HELM_BIN init --service-account=tiller --wait
+ fi
+ $HELM_BIN version
+}
+
+# Used by non-kind providers to tag image with its id. This can force our e2e
+# process to pull correct image even if IfNotPresent is used in an existing
+# environment, e.g. testing in the same cluster.
+function e2e::image_id_tag() {
+ docker image inspect -f '{{.Id}}' "$1" | cut -d ':' -f 2 | head -c 10
+}
+
+function e2e::image_load() {
+ local images=(
+ $TIDB_OPERATOR_IMAGE
+ $TIDB_BACKUP_MANAGER_IMAGE
+ $E2E_IMAGE
+ )
+ if [ "$PROVIDER" == "kind" ]; then
+ local nodes=$($KIND_BIN get nodes --name $CLUSTER | grep -v 'control-plane$')
+ echo "info: load images ${images[@]}"
+ for n in ${images[@]}; do
+ $KIND_BIN load docker-image --name $CLUSTER $n --nodes $(hack::join ',' ${nodes[@]})
+ done
+ elif [ "$PROVIDER" == "gke" ]; then
+ unset DOCKER_CONFIG # We don't need this and it may be read-only and fail the command to fail
+ gcloud auth configure-docker
+ GCP_TIDB_OPERATOR_IMAGE=gcr.io/$GCP_PROJECT/tidb-operator:$CLUSTER-$(e2e::image_id_tag $TIDB_OPERATOR_IMAGE)
+ GCP_TIDB_BACKUP_MANAGER_IMAGE=gcr.io/$GCP_PROJECT/tidb-backup-image:$CLUSTER-$(e2e::image_id_tag $TIDB_BACKUP_MANAGER_IMAGE)
+ GCP_E2E_IMAGE=gcr.io/$GCP_PROJECT/tidb-operator-e2e:$CLUSTER-$(e2e::image_id_tag $E2E_IMAGE)
+ docker tag $TIDB_OPERATOR_IMAGE $GCP_TIDB_OPERATOR_IMAGE
+ docker tag $E2E_IMAGE $GCP_E2E_IMAGE
+ docker tag $TIDB_BACKUP_MANAGER_IMAGE $GCP_TIDB_BACKUP_MANAGER_IMAGE
+ echo "info: pushing $GCP_TIDB_OPERATOR_IMAGE"
+ docker push $GCP_TIDB_OPERATOR_IMAGE
+ echo "info: pushing $GCP_E2E_IMAGE"
+ docker push $GCP_E2E_IMAGE
+ echo "info: pushing $GCP_TIDB_BACKUP_MANAGER_IMAGE"
+ docker push $GCP_TIDB_BACKUP_MANAGER_IMAGE
+ TIDB_OPERATOR_IMAGE=$GCP_TIDB_OPERATOR_IMAGE
+ E2E_IMAGE=$GCP_E2E_IMAGE
+ TIDB_BACKUP_MANAGER_IMAGE=$GCP_TIDB_BACKUP_MANAGER_IMAGE
+ elif [ "$PROVIDER" == "eks" ]; then
+ for repoName in e2e/tidb-operator e2e/tidb-operator-e2e e2e/tidb-backup-manager; do
+ local ret=0
+ aws ecr describe-repositories --repository-names $repoName || ret=$?
+ if [ $ret -ne 0 ]; then
+ echo "info: creating repository $repoName"
+ aws ecr create-repository --repository-name $repoName
+ fi
+ done
+ local ecrURL=$(e2e::__ecr_url)
+ echo "info: logging in $ecrURL"
+ aws ecr get-login-password | docker login --username AWS --password-stdin $ecrURL
+ AWS_TIDB_OPERATOR_IMAGE=$ecrURL/e2e/tidb-operator:$CLUSTER-$(e2e::image_id_tag $TIDB_OPERATOR_IMAGE)
+ AWS_TIDB_BACKUP_MANAGER_IMAGE=$ecrURL/e2e/tidb-backup-manager:$CLUSTER-$(e2e::image_id_tag $TIDB_BACKUP_MANAGER_IMAGE)
+ AWS_E2E_IMAGE=$ecrURL/e2e/tidb-operator-e2e:$CLUSTER-$(e2e::image_id_tag $E2E_IMAGE)
+ docker tag $TIDB_OPERATOR_IMAGE $AWS_TIDB_OPERATOR_IMAGE
+ docker tag $TIDB_BACKUP_MANAGER_IMAGE $AWS_TIDB_BACKUP_MANAGER_IMAGE
+ docker tag $E2E_IMAGE $AWS_E2E_IMAGE
+ echo "info: pushing $AWS_TIDB_OPERATOR_IMAGE"
+ docker push $AWS_TIDB_OPERATOR_IMAGE
+ echo "info: pushing $AWS_TIDB_BACKUP_MANAGER_IMAGE"
+ docker push $AWS_TIDB_BACKUP_MANAGER_IMAGE
+ echo "info: pushing $AWS_E2E_IMAGE"
+ docker push $AWS_E2E_IMAGE
+ TIDB_BACKUP_MANAGER_IMAGE=$AWS_TIDB_BACKUP_MANAGER_IMAGE
+ TIDB_OPERATOR_IMAGE=$AWS_TIDB_OPERATOR_IMAGE
+ E2E_IMAGE=$AWS_E2E_IMAGE
+ else
+ echo "info: unsupported provider '$PROVIDER', skip loading images"
+ fi
+}
+
+hack::ensure_kubectl
+hack::ensure_helm
+
+if [ "$PROVIDER" == "gke" ]; then
+ if [ -n "$GCP_CREDENTIALS" ]; then
+ gcloud auth activate-service-account --key-file "$GCP_CREDENTIALS"
+ fi
+ if [ -n "$GCP_REGION" ]; then
+ gcloud config set compute/region "$GCP_REGION"
+ fi
+ if [ -n "$GCP_ZONE" ]; then
+ gcloud config set compute/zone "$GCP_ZONE"
+ fi
+ gcloud container clusters get-credentials "$CLUSTER"
+elif [ "$PROVIDER" == "eks" ]; then
+ aws eks update-kubeconfig --name "$CLUSTER"
+fi
+
+if [ -z "$KUBECONTEXT" ]; then
+ echo "info: KUBECONTEXT is not set, current context is used"
+ KUBECONTEXT=$($KUBECTL_BIN config current-context 2>/dev/null) || true
+ if [ -z "$KUBECONTEXT" ]; then
+ echo "error: current context cannot be detected"
+ exit 1
+ fi
+ echo "info: current kubeconfig context is '$KUBECONTEXT'"
+fi
+
+if [ -z "$SKIP_IMAGE_LOAD" ]; then
+ e2e::image_load
+fi
+
+e2e::setup_local_pvs
+e2e::setup_helm_server
+
+if [ -n "$SKIP_GINKGO" ]; then
+ echo "info: skipping ginkgo"
+ exit 0
+fi
+
+echo "info: start to run e2e process"
ginkgo_args=()
@@ -66,33 +310,25 @@ if [[ "${GINKGO_STREAM}" == "y" ]]; then
ginkgo_args+=("--stream")
fi
-echo "info: start to run e2e process"
e2e_args=(
/usr/local/bin/ginkgo
${ginkgo_args[@]:-}
/usr/local/bin/e2e.test
--
- --provider=skeleton
--clean-start=true
--delete-namespace-on-failure=false
- --repo-root=$ROOT
+ --repo-root="$ROOT"
# tidb-operator e2e flags
--operator-tag=e2e
- --operator-image=${TIDB_OPERATOR_IMAGE}
- --e2e-image=${E2E_IMAGE}
+ --operator-image="${TIDB_OPERATOR_IMAGE}"
+ --backup-image="${TIDB_BACKUP_MANAGER_IMAGE}"
+ --e2e-image="${E2E_IMAGE}"
# two tidb versions can be configuraed: ,
--tidb-versions=v3.0.7,v3.0.8
--chart-dir=/charts
-v=4
)
-if [ -n "$REPORT_DIR" ]; then
- e2e_args+=(
- --report-dir="${REPORT_DIR}"
- --report-prefix="${REPORT_PREFIX}"
- )
-fi
-
e2e_args+=(${@:-})
docker_args=(
@@ -106,9 +342,53 @@ docker_args=(
-v $KUBECONFIG:/etc/kubernetes/admin.conf:ro
--env KUBECONFIG=/etc/kubernetes/admin.conf
--env KUBECONTEXT=$KUBECONTEXT
+ --env KUBE_SSH_USER=$KUBE_SSH_USER
)
+if [ "$PROVIDER" == "eks" ]; then
+ e2e_args+=(
+ --provider=aws
+ --gce-zone="${AWS_ZONE}" # reuse gce-zone to configure aws zone
+ )
+ docker_args+=(
+ # aws credential is required to get token for EKS
+ -v $HOME/.aws:/root/.aws
+ # ~/.ssh/kube_aws_rsa must be mounted into e2e container to run ssh
+ -v $HOME/.ssh/kube_aws_rsa:/root/.ssh/kube_aws_rsa
+ )
+elif [ "$PROVIDER" == "gke" ]; then
+ e2e_args+=(
+ --provider="${PROVIDER}"
+ --gce-project="${GCP_PROJECT}"
+ --gce-region="${GCP_REGION}"
+ --gce-zone="${GCP_ZONE}"
+ )
+ docker_args+=(
+ -v ${GCP_CREDENTIALS}:${GCP_CREDENTIALS}
+ --env GOOGLE_APPLICATION_CREDENTIALS=${GCP_CREDENTIALS}
+ )
+ # google-cloud-sdk is very large, we didn't pack it into our e2e image.
+ # instead, we use the sdk installed in CI image.
+ if [ ! -e "${GCP_SDK}/bin/gcloud" ]; then
+ echo "error: ${GCP_SDK} is not google cloud sdk, please install it here or specify correct path via GCP_SDK env"
+ exit 1
+ fi
+ docker_args+=(
+ -v ${GCP_SDK}:/google-cloud-sdk
+ # ~/.ssh/google_compute_engine must be mounted into e2e container to run ssh
+ -v $HOME/.ssh/google_compute_engine:/root/.ssh/google_compute_engine
+ )
+else
+ e2e_args+=(
+ --provider="${PROVIDER}"
+ )
+fi
+
if [ -n "$REPORT_DIR" ]; then
+ e2e_args+=(
+ --report-dir="${REPORT_DIR}"
+ --report-prefix="${REPORT_PREFIX}"
+ )
docker_args+=(
-v $REPORT_DIR:$REPORT_DIR
)
diff --git a/hack/run-in-container.sh b/hack/run-in-container.sh
index d99204bbb9..317751f8e3 100755
--- a/hack/run-in-container.sh
+++ b/hack/run-in-container.sh
@@ -78,15 +78,20 @@ fi
args=(bash)
if [ $# -gt 0 ]; then
- args=($@)
+ args=("$@")
fi
docker_args=(
- -it --rm
+ --rm
-h $NAME
--name $NAME
)
+if [ -t 1 ]; then
+ # Allocate a pseudo-TTY when the STDIN is a terminal
+ docker_args+=(-it)
+fi
+
# required by dind
docker_args+=(
--privileged
@@ -139,5 +144,5 @@ docker run ${docker_args[@]} \
-v $ROOT:/go/src/github.com/pingcap/tidb-operator \
-w /go/src/github.com/pingcap/tidb-operator \
--entrypoint /usr/local/bin/runner.sh \
- gcr.io/k8s-testimages/kubekins-e2e:v20191108-9467d02-master \
+ gcr.io/k8s-testimages/kubekins-e2e:v20200311-1e25827-master \
"${args[@]}"
diff --git a/hack/update-crd-groups.sh b/hack/update-crd-groups.sh
index 1b1a4df4db..b55f35302e 100755
--- a/hack/update-crd-groups.sh
+++ b/hack/update-crd-groups.sh
@@ -39,3 +39,15 @@ to-crdgen generate backupschedule >> $crd_target
to-crdgen generate tidbmonitor >> $crd_target
to-crdgen generate tidbinitializer >> $crd_target
to-crdgen generate tidbclusterautoscaler >> $crd_target
+
+
+
+hack::ensure_gen_crd_api_references_docs
+
+DOCS_PATH="$ROOT/docs/api-references"
+
+${DOCS_BIN} \
+-config "$DOCS_PATH/config.json" \
+-template-dir "$DOCS_PATH/template" \
+-api-dir "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" \
+-out-file "$DOCS_PATH/docs.md"
diff --git a/hack/verify-crd-groups.sh b/hack/verify-crd-groups.sh
index ca6ed42fb3..5d6cd11654 100755
--- a/hack/verify-crd-groups.sh
+++ b/hack/verify-crd-groups.sh
@@ -24,7 +24,12 @@ target="manifests/crd.yaml"
verify_tmp=$(mktemp)
trap "rm -f $verify_tmp" EXIT
+targetDocs="$ROOT/docs/api-references/docs.md"
+verifyDocs_tmp=$(mktemp)
+trap "rm -f $verifyDocs_tmp" EXIT
+
cp "$target" "${verify_tmp}"
+cp "$targetDocs" "${verifyDocs_tmp}"
hack/update-crd-groups.sh
@@ -36,3 +41,12 @@ if [[ -n "${diff}" ]]; then
echo "Run ./hack/update-crd-groups.sh" >&2
exit 1
fi
+
+echo "diffing $targetDocs with $verifyDocs_tmp" >&2
+diff=$(diff "$targetDocs" "$verifyDocs_tmp") || true
+if [[ -n "${diff}" ]]; then
+ echo "${diff}" >&2
+ echo >&2
+ echo "Run ./hack/update-crd-groups.sh" >&2
+ exit 1
+fi
diff --git a/images/backup-manager/Dockerfile b/images/backup-manager/Dockerfile
deleted file mode 100644
index 32f0940baa..0000000000
--- a/images/backup-manager/Dockerfile
+++ /dev/null
@@ -1,15 +0,0 @@
-FROM pingcap/tidb-enterprise-tools:latest
-
-ARG VERSION=v1.48.0
-RUN apk update && apk add ca-certificates
-
-RUN wget -nv https://github.com/ncw/rclone/releases/download/${VERSION}/rclone-${VERSION}-linux-amd64.zip \
- && unzip rclone-${VERSION}-linux-amd64.zip \
- && mv rclone-${VERSION}-linux-amd64/rclone /usr/local/bin \
- && chmod 755 /usr/local/bin/rclone \
- && rm -rf rclone-${VERSION}-linux-amd64.zip rclone-${VERSION}-linux-amd64
-
-COPY bin/tidb-backup-manager /tidb-backup-manager
-COPY entrypoint.sh /entrypoint.sh
-
-ENTRYPOINT ["/entrypoint.sh"]
diff --git a/images/tidb-backup-manager/Dockerfile b/images/tidb-backup-manager/Dockerfile
new file mode 100644
index 0000000000..6b29976a56
--- /dev/null
+++ b/images/tidb-backup-manager/Dockerfile
@@ -0,0 +1,48 @@
+FROM pingcap/tidb-enterprise-tools:latest
+ARG VERSION=v1.51.0
+ARG SHUSH_VERSION=v1.4.0
+ARG TOOLKIT_VERSION=v3.0.12
+ARG TOOLKIT_V31=v3.1.0-rc
+ARG TOOLKIT_V40=v4.0.0-rc
+RUN apk update && apk add ca-certificates
+
+RUN wget -nv https://github.com/ncw/rclone/releases/download/${VERSION}/rclone-${VERSION}-linux-amd64.zip \
+ && unzip rclone-${VERSION}-linux-amd64.zip \
+ && mv rclone-${VERSION}-linux-amd64/rclone /usr/local/bin \
+ && chmod 755 /usr/local/bin/rclone \
+ && rm -rf rclone-${VERSION}-linux-amd64.zip rclone-${VERSION}-linux-amd64
+
+RUN wget -nv https://github.com/realestate-com-au/shush/releases/download/${SHUSH_VERSION}/shush_linux_amd64 \
+ && mv shush_linux_amd64 /usr/local/bin/shush \
+ && chmod 755 /usr/local/bin/shush
+
+RUN \
+ wget -nv https://download.pingcap.org/tidb-toolkit-${TOOLKIT_VERSION}-linux-amd64.tar.gz \
+ && tar -xzf tidb-toolkit-${TOOLKIT_VERSION}-linux-amd64.tar.gz \
+ && mv tidb-toolkit-${TOOLKIT_VERSION}-linux-amd64/bin/tidb-lightning /tidb-lightning \
+ && mv tidb-toolkit-${TOOLKIT_VERSION}-linux-amd64/bin/tidb-lightning-ctl /tidb-lightning-ctl \
+ && chmod 755 /tidb-lightning /tidb-lightning-ctl \
+ && rm -rf tidb-toolkit-${TOOLKIT_VERSION}-linux-amd64.tar.gz \
+ && rm -rf tidb-toolkit-${TOOLKIT_VERSION}-linux-amd64
+
+RUN \
+ wget -nv https://download.pingcap.org/tidb-toolkit-${TOOLKIT_V31}-linux-amd64.tar.gz \
+ && tar -xzf tidb-toolkit-${TOOLKIT_V31}-linux-amd64.tar.gz \
+ && mv tidb-toolkit-${TOOLKIT_V31}-linux-amd64/bin/br /usr/local/bin/br31 \
+ && chmod 755 /usr/local/bin/br31 \
+ && rm -rf tidb-toolkit-${TOOLKIT_V31}-linux-amd64.tar.gz \
+ && rm -rf tidb-toolkit-${TOOLKIT_V31}-linux-amd64
+
+RUN \
+ wget -nv https://download.pingcap.org/tidb-toolkit-${TOOLKIT_V40}-linux-amd64.tar.gz \
+ && tar -xzf tidb-toolkit-${TOOLKIT_V40}-linux-amd64.tar.gz \
+ && mv tidb-toolkit-${TOOLKIT_V40}-linux-amd64/bin/br /usr/local/bin/br40 \
+ && chmod 755 /usr/local/bin/br40 \
+ && rm -rf tidb-toolkit-${TOOLKIT_V40}-linux-amd64.tar.gz \
+ && rm -rf tidb-toolkit-${TOOLKIT_V40}-linux-amd64
+
+COPY bin/tidb-backup-manager /tidb-backup-manager
+COPY entrypoint.sh /entrypoint.sh
+
+
+ENTRYPOINT ["/entrypoint.sh"]
diff --git a/images/backup-manager/entrypoint.sh b/images/tidb-backup-manager/entrypoint.sh
similarity index 83%
rename from images/backup-manager/entrypoint.sh
rename to images/tidb-backup-manager/entrypoint.sh
index 85c889147d..fc11dc02f2 100755
--- a/images/backup-manager/entrypoint.sh
+++ b/images/tidb-backup-manager/entrypoint.sh
@@ -19,7 +19,7 @@ echo "Create rclone.conf file."
cat < /tmp/rclone.conf
[s3]
type = s3
-env_auth = false
+env_auth = true
provider = ${S3_PROVIDER}
access_key_id = ${AWS_ACCESS_KEY_ID}
secret_access_key = ${AWS_SECRET_ACCESS_KEY:-$AWS_SECRET_KEY}
@@ -51,33 +51,40 @@ else
fi
BACKUP_BIN=/tidb-backup-manager
+if [[ -n "${AWS_DEFAULT_REGION}"]]; then
+ EXEC_COMMAND="exec"
+else
+ EXEC_COMMAND="/usr/local/bin/shush exec --"
+fi
+
+cat /tmp/rclone.conf
# exec command
case "$1" in
backup)
shift 1
echo "$BACKUP_BIN backup $@"
- exec $BACKUP_BIN backup "$@"
+ $EXEC_COMMAND $BACKUP_BIN backup "$@"
;;
export)
shift 1
echo "$BACKUP_BIN export $@"
- exec $BACKUP_BIN export "$@"
+ $EXEC_COMMAND $BACKUP_BIN export "$@"
;;
restore)
shift 1
echo "$BACKUP_BIN restore $@"
- exec $BACKUP_BIN restore "$@"
+ $EXEC_COMMAND $BACKUP_BIN restore "$@"
;;
import)
shift 1
echo "$BACKUP_BIN import $@"
- exec $BACKUP_BIN import "$@"
+ $EXEC_COMMAND $BACKUP_BIN import "$@"
;;
clean)
shift 1
echo "$BACKUP_BIN clean $@"
- exec $BACKUP_BIN clean "$@"
+ $EXEC_COMMAND $BACKUP_BIN clean "$@"
;;
*)
echo "Usage: $0 {backup|restore|clean}"
diff --git a/manifests/backup/backup-aws-s3-br.yaml b/manifests/backup/backup-aws-s3-br.yaml
new file mode 100644
index 0000000000..a750e660c1
--- /dev/null
+++ b/manifests/backup/backup-aws-s3-br.yaml
@@ -0,0 +1,35 @@
+---
+apiVersion: pingcap.com/v1alpha1
+kind: Backup
+metadata:
+ name: demo1-backup-s3
+ namespace: test1
+ # annotations:
+ # iam.amazonaws.com/role: "arn:aws:iam::123456789:role"
+spec:
+ # backupType: full
+ # useKMS: false
+ # serviceAccount: myServiceAccount
+ br:
+ cluster: myCluster
+ # clusterNamespce:
+ # logLevel: info
+ # statusAddr:
+ # concurrency: 4
+ # rateLimit: 0
+ # timeAgo:
+ # checksum: true
+ # sendCredToTikv: true
+ from:
+ host: 172.30.6.56
+ secretName: mySecret
+ # port: 4000
+ # user: root
+ # tlsClient:
+ # tlsSecret:
+ s3:
+ provider: aws
+ region: us-west-2
+ bucket: backup
+ prefix: test1-demo1
+ # secretName: aws-secret
diff --git a/manifests/backup/backup-s3-br.yaml b/manifests/backup/backup-s3-br.yaml
index d6a7bbbf60..95fa298419 100644
--- a/manifests/backup/backup-s3-br.yaml
+++ b/manifests/backup/backup-s3-br.yaml
@@ -4,13 +4,15 @@ kind: Backup
metadata:
name: demo1-backup-s3
namespace: test1
+ # annotations:
+ # iam.amazonaws.com/role: "arn:aws:iam::123456789:role"
spec:
- #backupType: full
+ # backupType: full
+ # useKMS: false
+ # serviceAccount: myServiceAccount
br:
- pd: 10.233.40.168:2379
- # ca:
- # cert:
- # key:
+ cluster: myCluster
+ # clusterNamespce:
# logLevel: info
# statusAddr:
# concurrency: 4
@@ -18,6 +20,13 @@ spec:
# timeAgo:
# checksum: true
# sendCredToTikv: true
+ from:
+ host: 172.30.6.56
+ secretName: mySecret
+ # port: 4000
+ # user: root
+ # tlsClient:
+ # tlsSecret:
s3:
provider: ceph
endpoint: http://10.233.57.220
diff --git a/manifests/backup/backup-schedule-aws-s3-br.yaml b/manifests/backup/backup-schedule-aws-s3-br.yaml
new file mode 100644
index 0000000000..978cc9b55d
--- /dev/null
+++ b/manifests/backup/backup-schedule-aws-s3-br.yaml
@@ -0,0 +1,40 @@
+---
+apiVersion: pingcap.com/v1alpha1
+kind: BackupSchedule
+metadata:
+ name: demo1-backup-schedule-s3
+ namespace: test1
+ # annotations:
+ # iam.amazonaws.com/role: "arn:aws:iam::123456789:role"
+spec:
+ #maxBackups: 5
+ #pause: true
+ maxReservedTime: "3h"
+ schedule: "*/2 * * * *"
+ backupTemplate:
+ #backupType: full
+ # useKMS: false
+ # serviceAccount: myServiceAccount
+ br:
+ cluster: myCluster
+ # clusterNamespce: backupNamespace
+ # logLevel: info
+ # statusAddr:
+ # concurrency: 4
+ # rateLimit: 0
+ # timeAgo:
+ # checksum: true
+ # sendCredToTikv: true
+ from:
+ host: 172.30.6.56
+ secretName: mysecret
+ # port: 4000
+ # user: root
+ # tlsClient:
+ # tlsSecret:
+ s3:
+ provider: aws
+ region: us-west-2
+ bucket: backup
+ prefix: test1-demo1
+ # secretName: aws-secret
diff --git a/manifests/backup/backup-schedule-s3-br.yaml b/manifests/backup/backup-schedule-s3-br.yaml
index 622a7680ae..6880f686fe 100644
--- a/manifests/backup/backup-schedule-s3-br.yaml
+++ b/manifests/backup/backup-schedule-s3-br.yaml
@@ -4,6 +4,8 @@ kind: BackupSchedule
metadata:
name: demo1-backup-schedule-s3
namespace: test1
+ # annotations:
+ # iam.amazonaws.com/role: "arn:aws:iam::123456789:role"
spec:
#maxBackups: 5
#pause: true
@@ -11,11 +13,11 @@ spec:
schedule: "*/2 * * * *"
backupTemplate:
#backupType: full
+ # useKMS: false
+ # serviceAccount: myServiceAccount
br:
- pd: 10.233.40.168:2379
- # ca:
- # cert:
- # key:
+ cluster: myCluster
+ # clusterNamespce: backupNamespace
# logLevel: info
# statusAddr:
# concurrency: 4
@@ -23,6 +25,13 @@ spec:
# timeAgo:
# checksum: true
# sendCredToTikv: true
+ from:
+ host: 172.30.6.56
+ secretName: mysecret
+ # port: 4000
+ # user: root
+ # tlsClient:
+ # tlsSecret:
s3:
provider: ceph
endpoint: http://10.233.57.220
diff --git a/manifests/backup/restore-aws-s3-br.yaml b/manifests/backup/restore-aws-s3-br.yaml
new file mode 100644
index 0000000000..cc3880798c
--- /dev/null
+++ b/manifests/backup/restore-aws-s3-br.yaml
@@ -0,0 +1,37 @@
+---
+apiVersion: pingcap.com/v1alpha1
+kind: Restore
+metadata:
+ name: demo1-restore-s3-br
+ namespace: test1
+ # annotations:
+ # iam.amazonaws.com/role: "arn:aws:iam::123456789:role"
+spec:
+ # backupType: full
+ # useKMS: false
+ # serviceAccount: myServiceAccount
+ br:
+ cluster: myCluster
+ # clusterNamespce:
+ # db:
+ # table:
+ # logLevel: info
+ # statusAddr:
+ # concurrency: 4
+ # rateLimit: 0
+ # timeAgo:
+ # checksum: true
+ # sendCredToTikv: true
+ to:
+ host: 172.30.6.56
+ secretName: mySecret
+ # port: 4000
+ # user: root
+ # tlsClient:
+ # tlsSecret:
+ s3:
+ provider: aws
+ region: us-west-2
+ bucket: backup
+ prefix: test1-demo1
+ # secretName: aws-secret
diff --git a/manifests/backup/restore-s3-br.yaml b/manifests/backup/restore-s3-br.yaml
index b0d03f1718..047bcd8d64 100644
--- a/manifests/backup/restore-s3-br.yaml
+++ b/manifests/backup/restore-s3-br.yaml
@@ -4,15 +4,17 @@ kind: Restore
metadata:
name: demo1-restore-s3-br
namespace: test1
+ # annotations:
+ # iam.amazonaws.com/role: "arn:aws:iam::123456789:role"
spec:
# backupType: full
+ # useKMS: false
+ # serviceAccount: myServiceAccount
br:
- pd: 10.233.40.168:2379
+ cluster: myCluster
+ # clusterNamespce:
# db:
# table:
- # ca:
- # cert:
- # key:
# logLevel: info
# statusAddr:
# concurrency: 4
@@ -20,6 +22,13 @@ spec:
# timeAgo:
# checksum: true
# sendCredToTikv: true
+ to:
+ host: 172.30.6.56
+ secretName: mySecret
+ # port: 4000
+ # user: root
+ # tlsClient:
+ # tlsSecret:
s3:
provider: ceph
endpoint: http://10.233.57.220
diff --git a/manifests/crd.yaml b/manifests/crd.yaml
index 19c8afae32..9201b43b04 100644
--- a/manifests/crd.yaml
+++ b/manifests/crd.yaml
@@ -6,7 +6,7 @@ metadata:
name: tidbclusters.pingcap.com
spec:
additionalPrinterColumns:
- - JSONPath: .spec.pd.image
+ - JSONPath: .status.pd.image
description: The image for PD cluster
name: PD
type: string
@@ -22,7 +22,7 @@ spec:
description: The desired replicas number of PD cluster
name: Desire
type: integer
- - JSONPath: .spec.tikv.image
+ - JSONPath: .status.tikv.image
description: The image for TiKV cluster
name: TiKV
type: string
@@ -38,7 +38,7 @@ spec:
description: The desired replicas number of TiKV cluster
name: Desire
type: integer
- - JSONPath: .spec.tidb.image
+ - JSONPath: .status.tidb.image
description: The image for TiDB cluster
name: TiDB
type: string
@@ -50,6 +50,9 @@ spec:
description: The desired replicas number of TiDB cluster
name: Desire
type: integer
+ - JSONPath: .metadata.creationTimestamp
+ name: Age
+ type: date
group: pingcap.com
names:
kind: TidbCluster
@@ -675,10 +678,6 @@ spec:
description: 'Whether enable PVC reclaim for orphan PVC left by statefulset
scale-in Optional: Defaults to false'
type: boolean
- enableTLSCluster:
- description: 'Enable TLS connection between TiDB server components Optional:
- Defaults to false'
- type: boolean
helper:
description: HelperSpec contains details of helper component
properties:
@@ -704,6 +703,10 @@ spec:
description: Base node selectors of TiDB cluster Pods, components may
add or override selectors upon this respectively
type: object
+ paused:
+ description: Indicates that the tidb cluster is paused and will not
+ be processed by the controller.
+ type: boolean
pd:
description: PDSpec contains details of PD members
properties:
@@ -1344,6 +1347,7 @@ spec:
type: string
cluster-version:
type: string
+ dashboard: {}
election-interval:
description: ElectionInterval is the interval for etcd Raft
election.
@@ -1447,6 +1451,11 @@ spec:
pd-server:
description: PDServerConfig is the configuration for pd server.
properties:
+ metric-storage:
+ description: MetricStorage is the cluster metric storage.
+ Currently we use prometheus as metric storage, we may
+ use PD/TiKV as metric storage later. Imported from v3.1.0
+ type: string
use-region-storage:
description: UseRegionStorage enables the independent region
storage.
@@ -1460,6 +1469,10 @@ spec:
replication:
description: PDReplicationConfig is the replication configuration.
properties:
+ enable-placement-rules:
+ description: When PlacementRules feature is enabled. MaxReplicas
+ and LocationLabels are not used anymore.
+ type: string
max-replicas:
description: 'MaxReplicas is the number of replicas for
each region. Immutable, change should be made through
@@ -1469,7 +1482,8 @@ spec:
strictly-match-label:
description: StrictlyMatchLabel strictly checks if the label
of TiKV is matched with LocaltionLabels. Immutable, change
- should be made through pd-ctl after cluster creation
+ should be made through pd-ctl after cluster creation.
+ Imported from v3.1.0
type: string
type: object
schedule:
@@ -1515,6 +1529,17 @@ spec:
Immutable, change should be made through pd-ctl after
cluster creation
type: string
+ enable-cross-table-merge:
+ description: EnableCrossTableMerge is the option to enable
+ cross table merge. This means two Regions can be merged
+ with different table IDs. This option only works when
+ key type is "table". Imported from v3.1.0
+ type: string
+ enable-one-way-merge:
+ description: EnableOneWayMerge is the option to enable one
+ way merge. This means a Region can only be merged into
+ the next region of it. Imported from v3.1.0
+ type: string
high-space-ratio:
description: HighSpaceRatio is the highest usage ratio of
store which regraded as high space. High space means there
@@ -1541,7 +1566,8 @@ spec:
leader-schedule-limit:
description: 'LeaderScheduleLimit is the max coexist leader
schedules. Immutable, change should be made through pd-ctl
- after cluster creation Optional: Defaults to 4'
+ after cluster creation. Optional: Defaults to 4. Imported
+ from v3.1.0'
format: int64
type: integer
low-space-ratio:
@@ -1611,6 +1637,9 @@ spec:
after cluster creation Optional: Defaults to 64'
format: int64
type: integer
+ schedulers-payload:
+ description: Only used to display
+ type: object
schedulers-v2:
description: Schedulers support for loding customized schedulers
Immutable, change should be made through pd-ctl after
@@ -1644,7 +1673,7 @@ spec:
tolerant-size-ratio:
description: TolerantSizeRatio is the ratio of buffer size
for balance scheduler. Immutable, change should be made
- through pd-ctl after cluster creation
+ through pd-ctl after cluster creation. Imported from v3.1.0
format: double
type: number
type: object
@@ -1679,6 +1708,102 @@ spec:
cluster-level updateStrategy if present Optional: Defaults to
cluster-level setting'
type: string
+ env:
+ description: List of environment variables to set in the container,
+ like v1.Container.Env. Note that following env names cannot be
+ used and may be overrided by tidb-operator built envs. - NAMESPACE
+ - TZ - SERVICE_NAME - PEER_SERVICE_NAME - HEADLESS_SERVICE_NAME
+ - SET_NAME - HOSTNAME - CLUSTER_NAME - POD_NAME - BINLOG_ENABLED
+ - SLOW_LOG_FILE
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previous defined environment variables in the
+ container and any service environment variables. If a variable
+ cannot be resolved, the reference in the input string will
+ be unchanged. The $(VAR_NAME) syntax can be escaped with
+ a double $$, ie: $$(VAR_NAME). Escaped references will never
+ be expanded, regardless of whether the variable exists or
+ not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: EnvVarSource represents a source for the value
+ of an EnvVar.
+ properties:
+ configMapKeyRef:
+ description: Selects a key from a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: ObjectFieldSelector selects an APIVersioned
+ field of an object.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath is
+ written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the specified
+ API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: ResourceFieldSelector represents container
+ resources (cpu, memory) and their output format
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor: {}
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
hostNetwork:
description: 'Whether Hostnetwork of the component is enabled. Override
the cluster-level setting if present Optional: Defaults to cluster-level
@@ -1693,6 +1818,11 @@ spec:
description: 'Limits describes the maximum amount of compute resources
allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
type: object
+ maxFailoverCount:
+ description: 'MaxFailoverCount limit the max replicas could be added
+ in failover, 0 means no failover. Optional: Defaults to 3'
+ format: int32
+ type: integer
nodeSelector:
description: 'NodeSelector of the component. Merged into the cluster-level
nodeSelector if non-empty Optional: Defaults to cluster-level
@@ -2537,6 +2667,102 @@ spec:
cluster-level updateStrategy if present Optional: Defaults to
cluster-level setting'
type: string
+ env:
+ description: List of environment variables to set in the container,
+ like v1.Container.Env. Note that following env names cannot be
+ used and may be overrided by tidb-operator built envs. - NAMESPACE
+ - TZ - SERVICE_NAME - PEER_SERVICE_NAME - HEADLESS_SERVICE_NAME
+ - SET_NAME - HOSTNAME - CLUSTER_NAME - POD_NAME - BINLOG_ENABLED
+ - SLOW_LOG_FILE
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previous defined environment variables in the
+ container and any service environment variables. If a variable
+ cannot be resolved, the reference in the input string will
+ be unchanged. The $(VAR_NAME) syntax can be escaped with
+ a double $$, ie: $$(VAR_NAME). Escaped references will never
+ be expanded, regardless of whether the variable exists or
+ not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: EnvVarSource represents a source for the value
+ of an EnvVar.
+ properties:
+ configMapKeyRef:
+ description: Selects a key from a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: ObjectFieldSelector selects an APIVersioned
+ field of an object.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath is
+ written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the specified
+ API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: ResourceFieldSelector represents container
+ resources (cpu, memory) and their output format
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor: {}
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
hostNetwork:
description: 'Whether Hostnetwork of the component is enabled. Override
the cluster-level setting if present Optional: Defaults to cluster-level
@@ -3377,7 +3603,8 @@ spec:
to true if PumpSpec is non-nil, otherwise false'
type: boolean
config:
- description: TiDBConfig is the configuration of tidb-server
+ description: TiDBConfig is the configuration of tidb-server For
+ more detail, refer to https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration/
properties:
alter-primary-key:
description: 'Optional: Defaults to false'
@@ -3389,6 +3616,9 @@ spec:
description: Use socket file to write binlog, for compatible
with kafka version tidb-binlog.
type: string
+ enable:
+ description: optional
+ type: boolean
ignore-error:
description: If IgnoreError is true, when writing binlog
meets error, TiDB would ignore the error.
@@ -3409,12 +3639,45 @@ spec:
type: boolean
cors:
type: string
+ delay-clean-table-lock:
+ description: imported from v3.1.0 optional
+ format: int64
+ type: integer
enable-batch-dml:
description: 'Optional: Defaults to false'
type: boolean
+ enable-dynamic-config:
+ description: EnableDynamicConfig enables the TiDB to fetch configs
+ from PD and update itself during runtime. see https://github.com/pingcap/tidb/pull/13660
+ for more details.
+ type: boolean
enable-streaming:
description: 'Optional: Defaults to false'
type: boolean
+ enable-table-lock:
+ description: imported from v3.1.0 optional
+ type: boolean
+ experimental:
+ description: 'Experimental controls the features that are still
+ experimental: their semantics, interfaces are subject to change.
+ Using these features in the production environment is not
+ recommended.'
+ properties:
+ allow-auto-random:
+ description: Whether enable the syntax like `auto_random(3)`
+ on the primary key column. imported from TiDB v3.1.0
+ type: boolean
+ type: object
+ isolation-read:
+ description: IsolationRead is the config for isolation read.
+ properties:
+ engines:
+ description: Engines filters tidb-server access paths by
+ engine type. imported from v3.1.0
+ items:
+ type: string
+ type: array
+ type: object
lease:
description: 'Optional: Defaults to 45s'
type: string
@@ -3424,6 +3687,16 @@ spec:
disable-timestamp:
description: Disable automatic timestamps in output.
type: boolean
+ enable-error-stack:
+ description: EnableErrorStack enables annotating logs with
+ the full stack error message.
+ type: boolean
+ enable-slow-log:
+ type: boolean
+ enable-timestamp:
+ description: EnableTimestamp enables automatic timestamps
+ in log output.
+ type: boolean
expensive-threshold:
description: 'Optional: Defaults to 10000'
format: int32
@@ -3465,6 +3738,8 @@ spec:
description: 'Optional: Defaults to 1'
format: int64
type: integer
+ slow-query-file:
+ type: string
slow-threshold:
description: 'Optional: Defaults to 300'
format: int64
@@ -3473,10 +3748,20 @@ spec:
lower-case-table-names:
format: int32
type: integer
+ max-server-connections:
+ description: MaxServerConnections is the maximum permitted number
+ of simultaneous client connections.
+ format: int64
+ type: integer
mem-quota-query:
description: 'Optional: Defaults to 34359738368'
format: int64
type: integer
+ new_collations_enabled_on_first_bootstrap:
+ description: NewCollationsEnabledOnFirstBootstrap indicates
+ if the new collations are enabled, it effects only when a
+ TiDB cluster bootstrapped on the first time.
+ type: boolean
oom-action:
description: 'Optional: Defaults to log'
type: string
@@ -3628,6 +3913,14 @@ spec:
networks.
type: string
type: object
+ repair-mode:
+ description: RepairMode indicates that the TiDB is in the repair
+ mode for table meta.
+ type: boolean
+ repair-table-list:
+ items:
+ type: string
+ type: array
run-ddl:
description: 'Optional: Defaults to true'
type: boolean
@@ -3677,6 +3970,13 @@ spec:
stmt-summary:
description: StmtSummary is the config for statement summary.
properties:
+ enable:
+ description: Enable statement summary or not.
+ type: boolean
+ history-size:
+ description: The maximum history size of statement summary.
+ format: int32
+ type: integer
max-sql-length:
description: 'The maximum length of displayed normalized
SQL and sample SQL. Optional: Defaults to 4096'
@@ -3687,6 +3987,10 @@ spec:
Optional: Defaults to 100'
format: int32
type: integer
+ refresh-interval:
+ description: The refresh interval of statement summary.
+ format: int32
+ type: integer
type: object
tikv-client:
description: TiKVClient is the config for tikv client.
@@ -3700,6 +4004,7 @@ spec:
description: 'CommitTimeout is the max time which command
''commit'' will wait. Optional: Defaults to 41s'
type: string
+ copr-cache: {}
grpc-connection-count:
description: 'GrpcConnectionCount is the max gRPC connections
that will be established with each tikv-server. Optional:
@@ -3779,10 +4084,102 @@ spec:
cluster-level updateStrategy if present Optional: Defaults to
cluster-level setting'
type: string
- enableTLSClient:
- description: 'Whether enable the TLS connection between the SQL
- client and TiDB server Optional: Defaults to false'
- type: boolean
+ env:
+ description: List of environment variables to set in the container,
+ like v1.Container.Env. Note that following env names cannot be
+ used and may be overrided by tidb-operator built envs. - NAMESPACE
+ - TZ - SERVICE_NAME - PEER_SERVICE_NAME - HEADLESS_SERVICE_NAME
+ - SET_NAME - HOSTNAME - CLUSTER_NAME - POD_NAME - BINLOG_ENABLED
+ - SLOW_LOG_FILE
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previous defined environment variables in the
+ container and any service environment variables. If a variable
+ cannot be resolved, the reference in the input string will
+ be unchanged. The $(VAR_NAME) syntax can be escaped with
+ a double $$, ie: $$(VAR_NAME). Escaped references will never
+ be expanded, regardless of whether the variable exists or
+ not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: EnvVarSource represents a source for the value
+ of an EnvVar.
+ properties:
+ configMapKeyRef:
+ description: Selects a key from a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: ObjectFieldSelector selects an APIVersioned
+ field of an object.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath is
+ written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the specified
+ API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: ResourceFieldSelector represents container
+ resources (cpu, memory) and their output format
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor: {}
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
hostNetwork:
description: 'Whether Hostnetwork of the component is enabled. Override
the cluster-level setting if present Optional: Defaults to cluster-level
@@ -3799,7 +4196,7 @@ spec:
type: object
maxFailoverCount:
description: 'MaxFailoverCount limit the max replicas could be added
- in failover, 0 means unlimited Optional: Defaults to 0'
+ in failover, 0 means no failover Optional: Defaults to 3'
format: int32
type: integer
nodeSelector:
@@ -3977,6 +4374,7 @@ spec:
to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
type: object
type: object
+ tlsClient: {}
tolerations:
description: 'Tolerations of the component. Override the cluster-level
tolerations if non-empty Optional: Defaults to cluster-level setting'
@@ -4025,8 +4423,8 @@ spec:
required:
- replicas
type: object
- tikv:
- description: TiKVSpec contains details of TiKV members
+ tiflash:
+ description: TiFlashSpec contains details of TiFlash members
properties:
affinity:
description: Affinity is a group of affinity scheduling rules.
@@ -4648,1412 +5046,3515 @@ spec:
during validation
type: string
config:
- description: TiKVConfig is the configuration of TiKV.
+ description: TiFlashConfig is the configuration of TiFlash.
properties:
- coprocessor:
- description: TiKVCoprocessorConfig is the configuration of TiKV
- Coprocessor component.
+ config:
+ description: CommonConfig is the configuration of TiFlash process.
properties:
- batch-split-limit:
- description: One split check produces several split keys
- in batch. This config limits the number of produced split
- keys in one batch. optional
- format: int64
- type: integer
- region-max-keys:
- description: 'When the number of keys in Region [a,e) exceeds
- the `region_max_keys`, it will be split into several Regions
- [a,b), [b,c), [c,d), [d,e) and the number of keys in [a,b),
- [b,c), [c,d) will be `region_split_keys`. See also: region-split-keys
- Optional: Defaults to 1440000 optional'
+ flash:
+ description: Flash is the configuration of [flash] section.
+ properties:
+ compact_log_min_period:
+ description: 'Optional: Defaults to 200'
+ format: int32
+ type: integer
+ flash_cluster:
+ description: FlashCluster is the configuration of [flash.flash_cluster]
+ section.
+ properties:
+ master_ttl:
+ description: 'Optional: Defaults to 60'
+ format: int32
+ type: integer
+ refresh_interval:
+ description: 'Optional: Defaults to 20'
+ format: int32
+ type: integer
+ update_rule_interval:
+ description: 'Optional: Defaults to 10'
+ format: int32
+ type: integer
+ type: object
+ overlap_threshold:
+ description: 'Optional: Defaults to 0.6'
+ format: double
+ type: number
+ type: object
+ loger:
+ description: FlashLogger is the configuration of [logger]
+ section.
+ properties:
+ count:
+ description: 'Optional: Defaults to 10'
+ format: int32
+ type: integer
+ level:
+ description: 'Optional: Defaults to information'
+ type: string
+ size:
+ description: 'Optional: Defaults to 100M'
+ type: string
+ type: object
+ mark_cache_size:
+ description: 'Optional: Defaults to 5368709120'
format: int64
type: integer
- region-max-size:
- description: 'When Region [a,e) size exceeds `region_max_size`,
- it will be split into several Regions [a,b), [b,c), [c,d),
- [d,e) and the size of [a,b), [b,c), [c,d) will be `region_split_size`
- (or a little larger). See also: region-split-size Optional:
- Defaults to 144MB optional'
- type: string
- region-split-keys:
- description: 'When the number of keys in Region [a,e) exceeds
- the `region_max_keys`, it will be split into several Regions
- [a,b), [b,c), [c,d), [d,e) and the number of keys in [a,b),
- [b,c), [c,d) will be `region_split_keys`. See also: region-max-keys
- Optional: Defaults to 960000 optional'
+ minmax_index_cache_size:
+ description: 'Optional: Defaults to 5368709120'
format: int64
type: integer
- region-split-size:
- description: 'When Region [a,e) size exceeds `region_max_size`,
- it will be split into several Regions [a,b), [b,c), [c,d),
- [d,e) and the size of [a,b), [b,c), [c,d) will be `region_split_size`
- (or a little larger). See also: region-max-size Optional:
- Defaults to 96MB optional'
- type: string
- split-region-on-table:
- description: 'When it is set to `true`, TiKV will try to
- split a Region with table prefix if that Region crosses
- tables. It is recommended to turn off this option if there
- will be a large number of tables created. Optional: Defaults
- to false optional'
+ path_realtime_mode:
+ description: 'Optional: Defaults to false'
type: boolean
type: object
- gc:
- properties:
- "\tbatch_keys":
- description: 'Optional: Defaults to 512'
- format: int64
- type: integer
- "\tmax_write_bytes_per_sec":
- type: string
+ type: object
+ configUpdateStrategy:
+ description: 'ConfigUpdateStrategy of the component. Override the
+ cluster-level updateStrategy if present Optional: Defaults to
+ cluster-level setting'
+ type: string
+ env:
+ description: List of environment variables to set in the container,
+ like v1.Container.Env. Note that following env names cannot be
+ used and may be overrided by tidb-operator built envs. - NAMESPACE
+ - TZ - SERVICE_NAME - PEER_SERVICE_NAME - HEADLESS_SERVICE_NAME
+ - SET_NAME - HOSTNAME - CLUSTER_NAME - POD_NAME - BINLOG_ENABLED
+ - SLOW_LOG_FILE
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previous defined environment variables in the
+ container and any service environment variables. If a variable
+ cannot be resolved, the reference in the input string will
+ be unchanged. The $(VAR_NAME) syntax can be escaped with
+ a double $$, ie: $$(VAR_NAME). Escaped references will never
+ be expanded, regardless of whether the variable exists or
+ not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: EnvVarSource represents a source for the value
+ of an EnvVar.
+ properties:
+ configMapKeyRef:
+ description: Selects a key from a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: ObjectFieldSelector selects an APIVersioned
+ field of an object.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath is
+ written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the specified
+ API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: ResourceFieldSelector represents container
+ resources (cpu, memory) and their output format
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor: {}
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ hostNetwork:
+ description: 'Whether Hostnetwork of the component is enabled. Override
+ the cluster-level setting if present Optional: Defaults to cluster-level
+ setting'
+ type: boolean
+ imagePullPolicy:
+ description: 'ImagePullPolicy of the component. Override the cluster-level
+ imagePullPolicy if present Optional: Defaults to cluster-level
+ setting'
+ type: string
+ limits:
+ description: 'Limits describes the maximum amount of compute resources
+ allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ logTailer:
+ description: LogTailerSpec represents an optional log tailer sidecar
+ container
+ properties:
+ limits:
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
type: object
- import:
- properties:
- import_dir:
- type: string
- max_open_engines:
- format: int64
- type: integer
- max_prepare_duration:
- type: string
- num_import_jobs:
- format: int64
- type: integer
- num_import_sst_jobs:
- format: int64
- type: integer
- num_threads:
- format: int64
- type: integer
- region_split_size:
- type: string
- stream_channel_window:
- format: int64
- type: integer
- upload_speed_limit:
- type: string
+ requests:
+ description: 'Requests describes the minimum amount of compute
+ resources required. If Requests is omitted for a container,
+ it defaults to Limits if that is explicitly specified, otherwise
+ to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
type: object
- log-file:
- type: string
- log-level:
- description: 'Optional: Defaults to info'
- type: string
- log-rotation-timespan:
- description: 'Optional: Defaults to 24h'
- type: string
- panic-when-unexpected-key-or-data:
- type: boolean
- pd:
- properties:
- endpoints:
- description: |-
- The PD endpoints for the client.
-
- Default is empty.
- items:
- type: string
- type: array
- retry_interval:
- description: |-
- The interval at which to retry a PD connection initialization.
-
- Default is 300ms. Optional: Defaults to 300ms
- type: string
- retry_log_every:
- description: |-
- If the client observes the same error message on retry, it can repeat the message only every `n` times.
+ type: object
+ maxFailoverCount:
+ description: 'MaxFailoverCount limit the max replicas could be added
+ in failover, 0 means no failover Optional: Defaults to 3'
+ format: int32
+ type: integer
+ nodeSelector:
+ description: 'NodeSelector of the component. Merged into the cluster-level
+ nodeSelector if non-empty Optional: Defaults to cluster-level
+ setting'
+ type: object
+ podSecurityContext:
+ description: PodSecurityContext holds pod-level security attributes
+ and common container settings. Some fields are also present in
+ container.securityContext. Field values of container.securityContext
+ take precedence over field values of PodSecurityContext.
+ properties:
+ fsGroup:
+ description: |-
+ A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:
- Default is 10. Set to 1 to disable this feature. Optional: Defaults to 10
- format: int64
- type: integer
- retry_max_count:
- description: |-
- The maximum number of times to retry a PD connection initialization.
+ 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----
- Default is isize::MAX, represented by -1. Optional: Defaults to -1
- format: int64
- type: integer
- type: object
- raftdb:
+ If unset, the Kubelet will not modify the ownership and permissions of any volume.
+ format: int64
+ type: integer
+ runAsGroup:
+ description: The GID to run the entrypoint of the container
+ process. Uses runtime default if unset. May also be set in
+ SecurityContext. If set in both SecurityContext and PodSecurityContext,
+ the value specified in SecurityContext takes precedence for
+ that container.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: Indicates that the container must run as a non-root
+ user. If true, the Kubelet will validate the image at runtime
+ to ensure that it does not run as UID 0 (root) and fail to
+ start the container if it does. If unset or false, no such
+ validation will be performed. May also be set in SecurityContext. If
+ set in both SecurityContext and PodSecurityContext, the value
+ specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: The UID to run the entrypoint of the container
+ process. Defaults to user specified in image metadata if unspecified.
+ May also be set in SecurityContext. If set in both SecurityContext
+ and PodSecurityContext, the value specified in SecurityContext
+ takes precedence for that container.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: SELinuxOptions are the labels to be applied to
+ the container
properties:
- allow_concurrent_memtable_write:
- type: boolean
- bytes_per_sync:
- type: string
- compaction_readahead_size:
- type: string
- create_if_missing:
- type: boolean
- defaultcf:
- description: TiKVCfConfig is the config of a cf
- properties:
- block-based-bloom-filter:
- type: boolean
- block-cache-size:
- type: string
- block-size:
- type: string
- bloom-filter-bits-per-key:
- format: int64
- type: integer
- cache-index-and-filter-blocks:
- type: boolean
- compaction-pri:
- format: int64
- type: integer
- compaction-style:
- format: int64
- type: integer
- compression-per-level:
- items:
- type: string
- type: array
- disable-auto-compactions:
- type: boolean
- disable-block-cache:
- type: boolean
- dynamic-level-bytes:
- type: boolean
- enable-doubly-skiplist:
- type: boolean
- force-consistency-checks:
- type: boolean
- hard-pending-compaction-bytes-limit:
- type: string
- level0-file-num-compaction-trigger:
- format: int64
- type: integer
- level0-slowdown-writes-trigger:
- format: int64
- type: integer
- level0-stop-writes-trigger:
- format: int64
- type: integer
- max-bytes-for-level-base:
- type: string
- max-bytes-for-level-multiplier:
- format: int64
- type: integer
- max-compaction-bytes:
- type: string
- max-write-buffer-number:
- format: int64
- type: integer
- min-write-buffer-number-to-merge:
- format: int64
- type: integer
- num-levels:
- format: int64
- type: integer
- optimize-filters-for-hits:
- type: boolean
- pin-l0-filter-and-index-blocks:
- type: boolean
- prop-keys-index-distance:
- format: int64
- type: integer
- prop-size-index-distance:
- format: int64
- type: integer
- read-amp-bytes-per-bit:
- format: int64
- type: integer
- soft-pending-compaction-bytes-limit:
- type: string
- target-file-size-base:
- type: string
- titan:
- description: TiKVTitanCfConfig is the titian config.
- properties:
- blob-cache-size:
- type: string
- blob-file-compression:
- type: string
- blob-run-mode:
- type: string
- discardable-ratio:
- format: double
- type: number
- max-gc-batch-size:
- type: string
- merge-small-file-threshold:
- type: string
- min-blob-size:
- type: string
- min-gc-batch-size:
- type: string
- sample-ratio:
- format: double
- type: number
- type: object
- use-bloom-filter:
- type: boolean
- whole-key-filtering:
- type: boolean
- write-buffer-size:
- type: string
- type: object
- enable_pipelined_write:
- type: boolean
- enable_statistics:
- type: boolean
- info_log_dir:
- type: string
- info_log_keep_log_file_num:
- format: int64
- type: integer
- info_log_max_size:
- type: string
- info_log_roll_time:
- type: string
- max_background_jobs:
- format: int64
- type: integer
- max_manifest_file_size:
- type: string
- max_open_files:
- format: int64
- type: integer
- max_sub_compactions:
- format: int64
- type: integer
- max_total_wal_size:
- type: string
- stats_dump_period:
- type: string
- use_direct_io_for_flush_and_compaction:
- type: boolean
- wal_bytes_per_sync:
- type: string
- wal_dir:
+ level:
+ description: Level is SELinux level label that applies to
+ the container.
type: string
- wal_recovery_mode:
+ role:
+ description: Role is a SELinux role label that applies to
+ the container.
type: string
- wal_size_limit:
+ type:
+ description: Type is a SELinux type label that applies to
+ the container.
type: string
- wal_ttl_seconds:
- format: int64
- type: integer
- writable_file_max_buffer_size:
+ user:
+ description: User is a SELinux user label that applies to
+ the container.
type: string
type: object
- raftstore:
- description: TiKVRaftstoreConfig is the configuration of TiKV
- raftstore component.
- properties:
- abnormal-leader-missing-duration:
- description: / Similar to the max-leader-missing-duration,
- instead it will log warnings and / try to alert monitoring
- systems, if there is any.
- type: string
- allow-remove-leader:
- type: boolean
- apply-max-batch-size:
- format: int64
- type: integer
- apply-pool-size:
- description: 'Optional: Defaults to 2'
- format: int64
- type: integer
- clean-stale-peer-delay:
- description: 'delay time before deleting a stale peer Optional:
- Defaults to 10m'
- type: string
- cleanup-import-sst-interval:
- description: 'Optional: Defaults to 10m'
- type: string
- consistency-check-interval:
- description: 'Interval (ms) to check region whether the
- data is consistent. Optional: Defaults to 0'
- type: string
- hibernate-regions:
- type: boolean
- leader-transfer-max-log-lag:
- format: int64
- type: integer
- lock-cf-compact-bytes-threshold:
- description: 'Optional: Defaults to 256MB'
- type: string
- lock-cf-compact-interval:
- description: 'Optional: Defaults to 10m'
- type: string
- max-leader-missing-duration:
- description: / If the leader of a peer is missing for longer
- than max-leader-missing-duration / the peer would ask
- pd to confirm whether it is valid in any region. / If
- the peer is stale and is not valid in any region, it will
- destroy itself.
- type: string
- max-peer-down-duration:
- description: '/ When a peer is not active for max-peer-down-duration
- / the peer is considered to be down and is reported to
- PD. Optional: Defaults to 5m'
- type: string
- merge-check-tick-interval:
- description: / Interval to re-propose merge.
- type: string
- merge-max-log-gap:
- description: / Max log gap allowed to propose merge.
- format: int64
- type: integer
- messages-per-tick:
- format: int64
- type: integer
- notify-capacity:
- format: int64
- type: integer
- pd-heartbeat-tick-interval:
- description: 'Optional: Defaults to 60s'
- type: string
- pd-store-heartbeat-tick-interval:
- description: 'Optional: Defaults to 10s'
- type: string
- peer-stale-state-check-interval:
- type: string
- prevote:
- description: 'Optional: Defaults to true'
- type: boolean
- raft-base-tick-interval:
- description: raft-base-tick-interval is a base tick interval
- (ms).
- type: string
- raft-election-timeout-ticks:
- format: int64
- type: integer
- raft-entry-cache-life-time:
- description: When a peer is not responding for this time,
- leader will not keep entry cache for it.
- type: string
- raft-entry-max-size:
- description: 'When the entry exceed the max size, reject
- to propose it. Optional: Defaults to 8MB'
- type: string
- raft-heartbeat-ticks:
- format: int64
- type: integer
- raft-log-gc-count-limit:
- description: 'When entry count exceed this value, gc will
- be forced trigger. Optional: Defaults to 72000'
- format: int64
- type: integer
- raft-log-gc-size-limit:
- description: 'When the approximate size of raft log entries
- exceed this value gc will be forced trigger. Optional:
- Defaults to 72MB'
- type: string
- raft-log-gc-threshold:
- description: 'A threshold to gc stale raft log, must >=
- 1. Optional: Defaults to 50'
- format: int64
- type: integer
- raft-log-gc-tick-interval:
- description: 'Interval to gc unnecessary raft log (ms).
- Optional: Defaults to 10s'
- type: string
- raft-reject-transfer-leader-duration:
- description: When a peer is newly added, reject transferring
- leader to the peer for a while.
- type: string
- raft-store-max-leader-lease:
- description: The lease provided by a successfully proposed
- and applied entry.
- type: string
- region-compact-check-interval:
- description: '/ Interval (ms) to check whether start compaction
- for a region. Optional: Defaults to 5m'
- type: string
- region-compact-check-step:
- description: '/ Number of regions for each time checking.
- Optional: Defaults to 100'
- format: int64
- type: integer
- region-compact-min-tombstones:
- description: '/ Minimum number of tombstones to trigger
- manual compaction. Optional: Defaults to 10000'
- format: int64
- type: integer
- region-compact-tombstones-percent:
- description: '/ Minimum percentage of tombstones to trigger
- manual compaction. / Should between 1 and 100. Optional:
- Defaults to 30'
- format: int64
- type: integer
- region-split-check-diff:
- description: '/ When size change of region exceed the diff
- since last check, it / will be checked again whether it
- should be split. Optional: Defaults to 6MB'
- type: string
- report-region-flow-interval:
- type: string
- right-derive-when-split:
- description: Right region derive origin region id when split.
- type: boolean
- snap-apply-batch-size:
- type: string
- snap-gc-timeout:
+ supplementalGroups:
+ description: A list of groups applied to the first process run
+ in each container, in addition to the container's primary
+ GID. If unspecified, no groups will be added to any container.
+ items:
+ format: int64
+ type: integer
+ type: array
+ sysctls:
+ description: Sysctls hold a list of namespaced sysctls used
+ for the pod. Pods with unsupported sysctls (by the container
+ runtime) might fail to launch.
+ items:
+ description: Sysctl defines a kernel parameter to be set
+ properties:
+ name:
+ description: Name of a property to set
+ type: string
+ value:
+ description: Value of a property to set
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ windowsOptions:
+ description: WindowsSecurityContextOptions contain Windows-specific
+ options and credentials.
+ properties:
+ gmsaCredentialSpec:
+ description: GMSACredentialSpec is where the GMSA admission
+ webhook (https://github.com/kubernetes-sigs/windows-gmsa)
+ inlines the contents of the GMSA credential spec named
+ by the GMSACredentialSpecName field. This field is alpha-level
+ and is only honored by servers that enable the WindowsGMSA
+ feature flag.
type: string
- snap-mgr-gc-tick-interval:
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name of the GMSA
+ credential spec to use. This field is alpha-level and
+ is only honored by servers that enable the WindowsGMSA
+ feature flag.
type: string
- split-region-check-tick-interval:
- description: 'Interval (ms) to check region whether need
- to be split or not. Optional: Defaults to 10s'
+ runAsUserName:
+ description: The UserName in Windows to run the entrypoint
+ of the container process. Defaults to the user specified
+ in image metadata if unspecified. May also be set in PodSecurityContext.
+ If set in both SecurityContext and PodSecurityContext,
+ the value specified in SecurityContext takes precedence.
+ This field is alpha-level and it is only honored by servers
+ that enable the WindowsRunAsUserName feature flag.
type: string
- store-max-batch-size:
- format: int64
- type: integer
- store-pool-size:
- description: 'Optional: Defaults to 2'
- format: int64
- type: integer
- sync-log:
- description: 'true for high reliability, prevent data loss
- when power failure. Optional: Defaults to true'
- type: boolean
- use-delete-range:
- type: boolean
type: object
- readpool:
- properties:
- coprocessor:
- properties:
- high_concurrency:
- description: 'Optional: Defaults to 8'
- format: int64
- type: integer
- low_concurrency:
- description: 'Optional: Defaults to 8'
- format: int64
- type: integer
- max_tasks_per_worker_high:
- description: 'Optional: Defaults to 2000'
- format: int64
- type: integer
- max_tasks_per_worker_low:
- description: 'Optional: Defaults to 2000'
- format: int64
- type: integer
- max_tasks_per_worker_normal:
- description: 'Optional: Defaults to 2000'
- format: int64
- type: integer
- normal_concurrency:
- description: 'Optional: Defaults to 8'
- format: int64
- type: integer
- stack_size:
- description: 'Optional: Defaults to 10MB'
- type: string
- type: object
- storage:
- properties:
- high_concurrency:
- description: 'Optional: Defaults to 4'
- format: int64
- type: integer
- low_concurrency:
- description: 'Optional: Defaults to 4'
- format: int64
- type: integer
- max_tasks_per_worker_high:
- description: 'Optional: Defaults to 2000'
- format: int64
- type: integer
- max_tasks_per_worker_low:
- description: 'Optional: Defaults to 2000'
- format: int64
- type: integer
- max_tasks_per_worker_normal:
- description: 'Optional: Defaults to 2000'
- format: int64
- type: integer
- normal_concurrency:
- description: 'Optional: Defaults to 4'
- format: int64
- type: integer
- stack_size:
- description: 'Optional: Defaults to 10MB'
- type: string
- type: object
- type: object
- rocksdb:
- description: TiKVDbConfig is the rocksdb config.
+ type: object
+ priorityClassName:
+ description: 'PriorityClassName of the component. Override the cluster-level
+ one if present Optional: Defaults to cluster-level setting'
+ type: string
+ privileged:
+ description: 'Whether create the TiFlash container in privileged
+ mode, it is highly discouraged to enable this in critical environment.
+ Optional: defaults to false'
+ type: boolean
+ replicas:
+ description: The desired ready replicas
+ format: int32
+ type: integer
+ requests:
+ description: 'Requests describes the minimum amount of compute resources
+ required. If Requests is omitted for a container, it defaults
+ to Limits if that is explicitly specified, otherwise to an implementation-defined
+ value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ schedulerName:
+ description: 'SchedulerName of the component. Override the cluster-level
+ one if present Optional: Defaults to cluster-level setting'
+ type: string
+ serviceAccount:
+ description: Specify a Service Account for TiFlash
+ type: string
+ storageClaims:
+ description: The persistent volume claims of the TiFlash data storages.
+ TiFlash supports multiple disks.
+ items:
+ description: StorageClaim contains details of TiFlash storages
+ properties:
+ resources:
+ description: ResourceRequirements describes the compute resource
+ requirements.
+ properties:
+ limits:
+ description: 'Limits describes the maximum amount of compute
+ resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ description: 'Requests describes the minimum amount of
+ compute resources required. If Requests is omitted for
+ a container, it defaults to Limits if that is explicitly
+ specified, otherwise to an implementation-defined value.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ type: object
+ storageClassName:
+ description: 'Name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
+ type: string
+ type: object
+ type: array
+ tolerations:
+ description: 'Tolerations of the component. Override the cluster-level
+ tolerations if non-empty Optional: Defaults to cluster-level setting'
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple using the
+ matching operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty
+ means match all taint effects. When specified, allowed values
+ are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies
+ to. Empty means match all taint keys. If the key is empty,
+ operator must be Exists; this combination means to match
+ all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the
+ value. Valid operators are Exists and Equal. Defaults to
+ Equal. Exists is equivalent to wildcard for value, so that
+ a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time
+ the toleration (which must be of effect NoExecute, otherwise
+ this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do
+ not evict). Zero and negative values will be treated as
+ 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches
+ to. If the operator is Exists, the value should be empty,
+ otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ version:
+ description: 'Version of the component. Override the cluster-level
+ version if non-empty Optional: Defaults to cluster-level setting'
+ type: string
+ required:
+ - replicas
+ - storageClaims
+ type: object
+ tikv:
+ description: TiKVSpec contains details of TiKV members
+ properties:
+ affinity:
+ description: Affinity is a group of affinity scheduling rules.
+ properties:
+ nodeAffinity:
+ description: Node affinity is a group of node affinity scheduling
+ rules.
properties:
- auto-tuned:
- type: boolean
- bytes-per-sync:
- type: string
- compaction-readahead-size:
- description: 'Optional: Defaults to 0'
- type: string
- create-if-missing:
- description: 'Optional: Defaults to true'
- type: boolean
- defaultcf:
- description: TiKVCfConfig is the config of a cf
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods
+ to nodes that satisfy the affinity expressions specified
+ by this field, but it may choose a node that violates
+ one or more of the expressions. The node that is most
+ preferred is the one with the greatest sum of weights,
+ i.e. for each node that meets all of the scheduling requirements
+ (resource request, requiredDuringScheduling affinity expressions,
+ etc.), compute a sum by iterating through the elements
+ of this field and adding "weight" to the sum if the node
+ matches the corresponding matchExpressions; the node(s)
+ with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches
+ all objects with implicit weight 0 (i.e. it's a no-op).
+ A null preferred scheduling term matches no objects
+ (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A null or empty node selector term matches
+ no objects. The requirements of them are ANDed.
+ The TopologySelectorTerm type implements a subset
+ of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: A node selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are
+ In, NotIn, Exists, DoesNotExist. Gt, and
+ Lt.
+ type: string
+ values:
+ description: An array of string values.
+ If the operator is In or NotIn, the values
+ array must be non-empty. If the operator
+ is Exists or DoesNotExist, the values
+ array must be empty. If the operator is
+ Gt or Lt, the values array must have a
+ single element, which will be interpreted
+ as an integer. This array is replaced
+ during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: A node selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are
+ In, NotIn, Exists, DoesNotExist. Gt, and
+ Lt.
+ type: string
+ values:
+ description: An array of string values.
+ If the operator is In or NotIn, the values
+ array must be non-empty. If the operator
+ is Exists or DoesNotExist, the values
+ array must be empty. If the operator is
+ Gt or Lt, the values array must have a
+ single element, which will be interpreted
+ as an integer. This array is replaced
+ during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding
+ nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - weight
+ - preference
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: A node selector represents the union of the
+ results of one or more label queries over a set of nodes;
+ that is, it represents the OR of the selectors represented
+ by the node selector terms.
properties:
- block-based-bloom-filter:
- type: boolean
- block-cache-size:
- type: string
- block-size:
- type: string
- bloom-filter-bits-per-key:
- format: int64
- type: integer
- cache-index-and-filter-blocks:
- type: boolean
- compaction-pri:
- format: int64
- type: integer
- compaction-style:
- format: int64
- type: integer
- compression-per-level:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms.
+ The terms are ORed.
items:
- type: string
- type: array
- disable-auto-compactions:
- type: boolean
- disable-block-cache:
- type: boolean
- dynamic-level-bytes:
- type: boolean
- enable-doubly-skiplist:
- type: boolean
- force-consistency-checks:
- type: boolean
- hard-pending-compaction-bytes-limit:
- type: string
- level0-file-num-compaction-trigger:
- format: int64
- type: integer
- level0-slowdown-writes-trigger:
- format: int64
- type: integer
- level0-stop-writes-trigger:
- format: int64
- type: integer
- max-bytes-for-level-base:
- type: string
- max-bytes-for-level-multiplier:
- format: int64
- type: integer
- max-compaction-bytes:
- type: string
- max-write-buffer-number:
- format: int64
- type: integer
- min-write-buffer-number-to-merge:
- format: int64
- type: integer
- num-levels:
- format: int64
- type: integer
- optimize-filters-for-hits:
- type: boolean
- pin-l0-filter-and-index-blocks:
- type: boolean
- prop-keys-index-distance:
- format: int64
- type: integer
- prop-size-index-distance:
- format: int64
- type: integer
- read-amp-bytes-per-bit:
- format: int64
- type: integer
- soft-pending-compaction-bytes-limit:
- type: string
- target-file-size-base:
- type: string
- titan:
- description: TiKVTitanCfConfig is the titian config.
- properties:
- blob-cache-size:
- type: string
- blob-file-compression:
- type: string
- blob-run-mode:
- type: string
- discardable-ratio:
- format: double
- type: number
- max-gc-batch-size:
- type: string
- merge-small-file-threshold:
- type: string
- min-blob-size:
- type: string
- min-gc-batch-size:
- type: string
- sample-ratio:
- format: double
- type: number
- type: object
- use-bloom-filter:
- type: boolean
- whole-key-filtering:
- type: boolean
- write-buffer-size:
- type: string
- type: object
- enable-pipelined-write:
- type: boolean
- enable-statistics:
- description: 'Optional: Defaults to true'
- type: boolean
- info-log-dir:
- type: string
- info-log-keep-log-file-num:
- format: int64
- type: integer
- info-log-max-size:
- type: string
- info-log-roll-time:
- type: string
- lockcf:
- description: TiKVCfConfig is the config of a cf
- properties:
- block-based-bloom-filter:
- type: boolean
- block-cache-size:
- type: string
- block-size:
- type: string
- bloom-filter-bits-per-key:
- format: int64
- type: integer
- cache-index-and-filter-blocks:
- type: boolean
- compaction-pri:
- format: int64
- type: integer
- compaction-style:
- format: int64
- type: integer
- compression-per-level:
- items:
- type: string
+ description: A null or empty node selector term matches
+ no objects. The requirements of them are ANDed.
+ The TopologySelectorTerm type implements a subset
+ of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: A node selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are
+ In, NotIn, Exists, DoesNotExist. Gt, and
+ Lt.
+ type: string
+ values:
+ description: An array of string values.
+ If the operator is In or NotIn, the values
+ array must be non-empty. If the operator
+ is Exists or DoesNotExist, the values
+ array must be empty. If the operator is
+ Gt or Lt, the values array must have a
+ single element, which will be interpreted
+ as an integer. This array is replaced
+ during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: A node selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are
+ In, NotIn, Exists, DoesNotExist. Gt, and
+ Lt.
+ type: string
+ values:
+ description: An array of string values.
+ If the operator is In or NotIn, the values
+ array must be non-empty. If the operator
+ is Exists or DoesNotExist, the values
+ array must be empty. If the operator is
+ Gt or Lt, the values array must have a
+ single element, which will be interpreted
+ as an integer. This array is replaced
+ during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
type: array
- disable-auto-compactions:
- type: boolean
- disable-block-cache:
- type: boolean
- dynamic-level-bytes:
- type: boolean
- enable-doubly-skiplist:
- type: boolean
- force-consistency-checks:
- type: boolean
- hard-pending-compaction-bytes-limit:
- type: string
- level0-file-num-compaction-trigger:
- format: int64
- type: integer
- level0-slowdown-writes-trigger:
- format: int64
- type: integer
- level0-stop-writes-trigger:
- format: int64
- type: integer
- max-bytes-for-level-base:
- type: string
- max-bytes-for-level-multiplier:
- format: int64
- type: integer
- max-compaction-bytes:
- type: string
- max-write-buffer-number:
- format: int64
- type: integer
- min-write-buffer-number-to-merge:
- format: int64
- type: integer
- num-levels:
- format: int64
- type: integer
- optimize-filters-for-hits:
- type: boolean
- pin-l0-filter-and-index-blocks:
- type: boolean
- prop-keys-index-distance:
- format: int64
- type: integer
- prop-size-index-distance:
- format: int64
- type: integer
- read-amp-bytes-per-bit:
- format: int64
- type: integer
- soft-pending-compaction-bytes-limit:
- type: string
- target-file-size-base:
- type: string
- titan:
- description: TiKVTitanCfConfig is the titian config.
- properties:
- blob-cache-size:
- type: string
- blob-file-compression:
- type: string
- blob-run-mode:
- type: string
- discardable-ratio:
- format: double
- type: number
- max-gc-batch-size:
- type: string
- merge-small-file-threshold:
- type: string
- min-blob-size:
- type: string
- min-gc-batch-size:
- type: string
- sample-ratio:
- format: double
- type: number
- type: object
- use-bloom-filter:
- type: boolean
- whole-key-filtering:
- type: boolean
- write-buffer-size:
- type: string
+ required:
+ - nodeSelectorTerms
type: object
- max-background-jobs:
- description: 'Optional: Defaults to 8'
- format: int64
- type: integer
- max-manifest-file-size:
- description: 'Optional: Defaults to 128MB'
- type: string
- max-open-files:
- description: 'Optional: Defaults to 40960'
- format: int64
- type: integer
- max-sub-compactions:
- description: 'Optional: Defaults to 3'
- format: int64
- type: integer
- max-total-wal-size:
- description: 'Optional: Defaults to 4GB'
- type: string
- raftcf:
- description: TiKVCfConfig is the config of a cf
- properties:
- block-based-bloom-filter:
- type: boolean
- block-cache-size:
- type: string
- block-size:
- type: string
- bloom-filter-bits-per-key:
- format: int64
- type: integer
- cache-index-and-filter-blocks:
- type: boolean
- compaction-pri:
- format: int64
- type: integer
- compaction-style:
- format: int64
- type: integer
- compression-per-level:
- items:
+ type: object
+ podAffinity:
+ description: Pod affinity is a group of inter pod affinity scheduling
+ rules.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods
+ to nodes that satisfy the affinity expressions specified
+ by this field, but it may choose a node that violates
+ one or more of the expressions. The node that is most
+ preferred is the one with the greatest sum of weights,
+ i.e. for each node that meets all of the scheduling requirements
+ (resource request, requiredDuringScheduling affinity expressions,
+ etc.), compute a sum by iterating through the elements
+ of this field and adding "weight" to the sum if the node
+ has pods which matches the corresponding podAffinityTerm;
+ the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s))
+ that this pod should be co-located (affinity) or
+ not co-located (anti-affinity) with, where co-located
+ is defined as running on a node whose value of the
+ label with key matches that of any
+ node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query
+ over a set of resources. The result of matchLabels
+ and matchExpressions are ANDed. An empty label
+ selector matches all objects. A null label selector
+ matches no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of
+ label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: A label selector requirement
+ is a selector that contains values, a
+ key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and
+ DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty.
+ If the operator is Exists or DoesNotExist,
+ the values array must be empty. This
+ array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is
+ "In", and the values array contains only
+ "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces
+ the labelSelector applies to (matches against);
+ null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods
+ matching the labelSelector in the specified
+ namespaces, where co-located is defined as running
+ on a node whose value of the label with key
+ topologyKey matches that of any node on which
+ any of the selected pods is running. Empty topologyKey
+ is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding
+ podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - weight
+ - podAffinityTerm
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this
+ field are not met at scheduling time, the pod will not
+ be scheduled onto the node. If the affinity requirements
+ specified by this field cease to be met at some point
+ during pod execution (e.g. due to a pod label update),
+ the system may or may not try to eventually evict the
+ pod from its node. When there are multiple elements, the
+ lists of nodes corresponding to each podAffinityTerm are
+ intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s))
+ that this pod should be co-located (affinity) or not
+ co-located (anti-affinity) with, where co-located is
+ defined as running on a node whose value of the label
+ with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over
+ a set of resources. The result of matchLabels and
+ matchExpressions are ANDed. An empty label selector
+ matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty. If
+ the operator is Exists or DoesNotExist,
+ the values array must be empty. This array
+ is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is "In",
+ and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces
+ the labelSelector applies to (matches against);
+ null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods
+ matching the labelSelector in the specified namespaces,
+ where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches
+ that of any node on which any of the selected pods
+ is running. Empty topologyKey is not allowed.
type: string
- type: array
- disable-auto-compactions:
- type: boolean
- disable-block-cache:
- type: boolean
- dynamic-level-bytes:
- type: boolean
- enable-doubly-skiplist:
- type: boolean
- force-consistency-checks:
- type: boolean
- hard-pending-compaction-bytes-limit:
- type: string
- level0-file-num-compaction-trigger:
- format: int64
- type: integer
- level0-slowdown-writes-trigger:
- format: int64
- type: integer
- level0-stop-writes-trigger:
- format: int64
- type: integer
- max-bytes-for-level-base:
- type: string
- max-bytes-for-level-multiplier:
- format: int64
- type: integer
- max-compaction-bytes:
- type: string
- max-write-buffer-number:
- format: int64
- type: integer
- min-write-buffer-number-to-merge:
- format: int64
- type: integer
- num-levels:
- format: int64
- type: integer
- optimize-filters-for-hits:
- type: boolean
- pin-l0-filter-and-index-blocks:
- type: boolean
- prop-keys-index-distance:
- format: int64
- type: integer
- prop-size-index-distance:
- format: int64
- type: integer
- read-amp-bytes-per-bit:
- format: int64
- type: integer
- soft-pending-compaction-bytes-limit:
- type: string
- target-file-size-base:
- type: string
- titan:
- description: TiKVTitanCfConfig is the titian config.
- properties:
- blob-cache-size:
- type: string
- blob-file-compression:
- type: string
- blob-run-mode:
- type: string
- discardable-ratio:
- format: double
- type: number
- max-gc-batch-size:
- type: string
- merge-small-file-threshold:
- type: string
- min-blob-size:
- type: string
- min-gc-batch-size:
- type: string
- sample-ratio:
- format: double
- type: number
- type: object
- use-bloom-filter:
- type: boolean
- whole-key-filtering:
- type: boolean
- write-buffer-size:
- type: string
- type: object
- rate-bytes-per-sec:
- type: string
- rate-limiter-mode:
- format: int64
- type: integer
- stats-dump-period:
- description: 'Optional: Defaults to 10m'
- type: string
- titan:
- description: TiKVTitanDBConfig is the config a titian db.
- properties:
- dirname:
- type: string
- disable-gc:
- type: boolean
- enabled:
- type: boolean
- max-background-gc:
- format: int64
- type: integer
- purge-obsolete-files-period:
- description: The value of this field will be truncated
- to seconds.
- type: string
- type: object
- use-direct-io-for-flush-and-compaction:
- type: boolean
- wal-bytes-per-sync:
- type: string
- wal-recovery-mode:
- description: 'Optional: Defaults to 2'
- format: int64
- type: integer
- wal-size-limit:
- type: string
- wal-ttl-seconds:
- format: int64
- type: integer
- writable-file-max-buffer-size:
- type: string
- writecf:
- description: TiKVCfConfig is the config of a cf
- properties:
- block-based-bloom-filter:
- type: boolean
- block-cache-size:
- type: string
- block-size:
- type: string
- bloom-filter-bits-per-key:
- format: int64
- type: integer
- cache-index-and-filter-blocks:
- type: boolean
- compaction-pri:
- format: int64
- type: integer
- compaction-style:
- format: int64
- type: integer
- compression-per-level:
- items:
- type: string
- type: array
- disable-auto-compactions:
- type: boolean
- disable-block-cache:
- type: boolean
- dynamic-level-bytes:
- type: boolean
- enable-doubly-skiplist:
- type: boolean
- force-consistency-checks:
- type: boolean
- hard-pending-compaction-bytes-limit:
- type: string
- level0-file-num-compaction-trigger:
- format: int64
- type: integer
- level0-slowdown-writes-trigger:
- format: int64
- type: integer
- level0-stop-writes-trigger:
- format: int64
- type: integer
- max-bytes-for-level-base:
- type: string
- max-bytes-for-level-multiplier:
- format: int64
- type: integer
- max-compaction-bytes:
- type: string
- max-write-buffer-number:
- format: int64
- type: integer
- min-write-buffer-number-to-merge:
- format: int64
- type: integer
- num-levels:
- format: int64
- type: integer
- optimize-filters-for-hits:
- type: boolean
- pin-l0-filter-and-index-blocks:
- type: boolean
- prop-keys-index-distance:
- format: int64
- type: integer
- prop-size-index-distance:
- format: int64
- type: integer
- read-amp-bytes-per-bit:
- format: int64
- type: integer
- soft-pending-compaction-bytes-limit:
- type: string
- target-file-size-base:
- type: string
- titan:
- description: TiKVTitanCfConfig is the titian config.
- properties:
- blob-cache-size:
- type: string
- blob-file-compression:
- type: string
- blob-run-mode:
- type: string
- discardable-ratio:
- format: double
- type: number
- max-gc-batch-size:
- type: string
- merge-small-file-threshold:
- type: string
- min-blob-size:
- type: string
- min-gc-batch-size:
- type: string
- sample-ratio:
- format: double
- type: number
- type: object
- use-bloom-filter:
- type: boolean
- whole-key-filtering:
- type: boolean
- write-buffer-size:
- type: string
- type: object
- type: object
- security:
- properties:
- ca_path:
- type: string
- cert_path:
- type: string
- cipher_file:
- type: string
- key_path:
- type: string
- override_ssl_target:
- type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
type: object
- server:
- description: TiKVServerConfig is the configuration of TiKV server.
+ podAntiAffinity:
+ description: Pod anti affinity is a group of inter pod anti
+ affinity scheduling rules.
properties:
- concurrent-recv-snap-limit:
- description: 'Optional: Defaults to 32'
- format: int32
- type: integer
- concurrent-send-snap-limit:
- description: 'Optional: Defaults to 32'
- format: int32
- type: integer
- end-point-batch-row-limit:
- format: int32
- type: integer
- end-point-enable-batch-if-possible:
- format: int32
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods
+ to nodes that satisfy the anti-affinity expressions specified
+ by this field, but it may choose a node that violates
+ one or more of the expressions. The node that is most
+ preferred is the one with the greatest sum of weights,
+ i.e. for each node that meets all of the scheduling requirements
+ (resource request, requiredDuringScheduling anti-affinity
+ expressions, etc.), compute a sum by iterating through
+ the elements of this field and adding "weight" to the
+ sum if the node has pods which matches the corresponding
+ podAffinityTerm; the node(s) with the highest sum are
+ the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s))
+ that this pod should be co-located (affinity) or
+ not co-located (anti-affinity) with, where co-located
+ is defined as running on a node whose value of the
+ label with key matches that of any
+ node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query
+ over a set of resources. The result of matchLabels
+ and matchExpressions are ANDed. An empty label
+ selector matches all objects. A null label selector
+ matches no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of
+ label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: A label selector requirement
+ is a selector that contains values, a
+ key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and
+ DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty.
+ If the operator is Exists or DoesNotExist,
+ the values array must be empty. This
+ array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is
+ "In", and the values array contains only
+ "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces
+ the labelSelector applies to (matches against);
+ null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods
+ matching the labelSelector in the specified
+ namespaces, where co-located is defined as running
+ on a node whose value of the label with key
+ topologyKey matches that of any node on which
+ any of the selected pods is running. Empty topologyKey
+ is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding
+ podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - weight
+ - podAffinityTerm
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified
+ by this field are not met at scheduling time, the pod
+ will not be scheduled onto the node. If the anti-affinity
+ requirements specified by this field cease to be met at
+ some point during pod execution (e.g. due to a pod label
+ update), the system may or may not try to eventually evict
+ the pod from its node. When there are multiple elements,
+ the lists of nodes corresponding to each podAffinityTerm
+ are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s))
+ that this pod should be co-located (affinity) or not
+ co-located (anti-affinity) with, where co-located is
+ defined as running on a node whose value of the label
+ with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over
+ a set of resources. The result of matchLabels and
+ matchExpressions are ANDed. An empty label selector
+ matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty. If
+ the operator is Exists or DoesNotExist,
+ the values array must be empty. This array
+ is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is "In",
+ and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces
+ the labelSelector applies to (matches against);
+ null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods
+ matching the labelSelector in the specified namespaces,
+ where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches
+ that of any node on which any of the selected pods
+ is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ annotations:
+ description: 'Annotations of the component. Merged into the cluster-level
+ annotations if non-empty Optional: Defaults to cluster-level setting'
+ type: object
+ baseImage:
+ description: Base image of the component, image tag is now allowed
+ during validation
+ type: string
+ config:
+ description: TiKVConfig is the configuration of TiKV.
+ properties:
+ coprocessor:
+ description: TiKVCoprocessorConfig is the configuration of TiKV
+ Coprocessor component.
+ properties:
+ batch-split-limit:
+ description: One split check produces several split keys
+ in batch. This config limits the number of produced split
+ keys in one batch. optional
+ format: int64
type: integer
- end-point-recursion-limit:
- description: 'Optional: Defaults to 1000'
- format: int32
+ region-max-keys:
+ description: 'When the number of keys in Region [a,e) exceeds
+ the `region-max-keys`, it will be split into several Regions
+ [a,b), [b,c), [c,d), [d,e) and the number of keys in [a,b),
+ [b,c), [c,d) will be `region-split-keys`. See also: region-split-keys
+ Optional: Defaults to 1440000 optional'
+ format: int64
type: integer
- end-point-request-max-handle-duration:
+ region-max-size:
+ description: 'When Region [a,e) size exceeds `region-max-size`,
+ it will be split into several Regions [a,b), [b,c), [c,d),
+ [d,e) and the size of [a,b), [b,c), [c,d) will be `region-split-size`
+ (or a little larger). See also: region-split-size Optional:
+ Defaults to 144MB optional'
type: string
- end-point-stream-batch-row-limit:
- format: int32
- type: integer
- end-point-stream-channel-size:
- format: int32
+ region-split-keys:
+ description: 'When the number of keys in Region [a,e) exceeds
+ the `region-max-keys`, it will be split into several Regions
+ [a,b), [b,c), [c,d), [d,e) and the number of keys in [a,b),
+ [b,c), [c,d) will be `region-split-keys`. See also: region-max-keys
+ Optional: Defaults to 960000 optional'
+ format: int64
type: integer
- grpc-compression-type:
- description: 'Optional: Defaults to none'
+ region-split-size:
+ description: 'When Region [a,e) size exceeds `region-max-size`,
+ it will be split into several Regions [a,b), [b,c), [c,d),
+ [d,e) and the size of [a,b), [b,c), [c,d) will be `region-split-size`
+ (or a little larger). See also: region-max-size Optional:
+ Defaults to 96MB optional'
type: string
- grpc-concurrency:
- description: 'Optional: Defaults to 4'
- format: int32
- type: integer
- grpc-concurrent-stream:
- description: 'Optional: Defaults to 1024'
- format: int32
- type: integer
- grpc-keepalive-time:
- description: 'Optional: Defaults to 10s'
- type: string
- grpc-keepalive-timeout:
- description: 'Optional: Defaults to 3s'
- type: string
- grpc-raft-conn-num:
- description: 'Optional: Defaults to 10'
- format: int32
- type: integer
- grpc-stream-initial-window-size:
- description: 'Optional: Defaults to 2MB'
- type: string
- grpc_memory_pool_quota:
- description: 'Optional: Defaults to 32G'
- type: string
- heavy-load-threshold:
- format: int32
- type: integer
- heavy-load-wait-duration:
- description: 'Optional: Defaults to 60s'
+ split-region-on-table:
+ description: 'When it is set to `true`, TiKV will try to
+ split a Region with table prefix if that Region crosses
+ tables. It is recommended to turn off this option if there
+ will be a large number of tables created. Optional: Defaults
+ to false optional'
+ type: boolean
+ type: object
+ encryption:
+ properties:
+ data-key-rotation-period:
+ description: 'The frequency of datakey rotation, It managered
+ by tikv Optional: default to 7d optional'
type: string
- labels:
+ master-key:
+ properties:
+ access-key:
+ description: AccessKey of AWS user, leave empty if using
+ other authrization method optional
+ type: string
+ endpoint:
+ description: Used for KMS compatible KMS, such as Ceph,
+ minio, If use AWS, leave empty optional
+ type: string
+ key-id:
+ description: AWS CMK key-id it can be find in AWS Console
+ or use aws cli This field is required
+ type: string
+ method:
+ description: 'Encrypyion method, use master key encryption
+ data key Possible values: plaintext, aes128-ctr, aes192-ctr,
+ aes256-ctr Optional: Default to plaintext optional'
+ type: string
+ path:
+ description: |-
+ Text file containing the key in hex form, end with '
+ '
+ type: string
+ region:
+ description: 'Region of this KMS key Optional: Default
+ to us-east-1 optional'
+ type: string
+ secret-access-key:
+ description: SecretKey of AWS user, leave empty if using
+ other authrization method optional
+ type: string
+ type:
+ description: 'Use KMS encryption or use file encryption,
+ possible values: kms, file If set to kms, kms MasterKeyKMSConfig
+ should be filled, if set to file MasterKeyFileConfig
+ should be filled optional'
+ type: string
+ required:
+ - path
+ - key-id
type: object
- snap-max-total-size:
- type: string
- snap-max-write-bytes-per-sec:
- description: 'Optional: Defaults to 100MB'
- type: string
- stats-concurrency:
- format: int32
- type: integer
- status-thread-pool-size:
- description: 'Optional: Defaults to 1'
+ method:
+ description: 'Encrypyion method, use data key encryption
+ raw rocksdb data Possible values: plaintext, aes128-ctr,
+ aes192-ctr, aes256-ctr Optional: Default to plaintext
+ optional'
type: string
- type: object
- storage:
- description: TiKVStorageConfig is the config of storage
- properties:
- block-cache:
- description: TiKVBlockCacheConfig is the config of a block
- cache
+ previous-master-key:
properties:
- capacity:
+ access-key:
+ description: AccessKey of AWS user, leave empty if using
+ other authrization method optional
type: string
- high-pri-pool-ratio:
- format: double
- type: number
- memory-allocator:
+ endpoint:
+ description: Used for KMS compatible KMS, such as Ceph,
+ minio, If use AWS, leave empty optional
type: string
- num-shard-bits:
- format: int64
- type: integer
- shared:
- description: 'Optional: Defaults to true'
- type: boolean
- strict-capacity-limit:
- type: boolean
+ key-id:
+ description: AWS CMK key-id it can be find in AWS Console
+ or use aws cli This field is required
+ type: string
+ method:
+ description: 'Encrypyion method, use master key encryption
+ data key Possible values: plaintext, aes128-ctr, aes192-ctr,
+ aes256-ctr Optional: Default to plaintext optional'
+ type: string
+ path:
+ description: |-
+ Text file containing the key in hex form, end with '
+ '
+ type: string
+ region:
+ description: 'Region of this KMS key Optional: Default
+ to us-east-1 optional'
+ type: string
+ secret-access-key:
+ description: SecretKey of AWS user, leave empty if using
+ other authrization method optional
+ type: string
+ type:
+ description: 'Use KMS encryption or use file encryption,
+ possible values: kms, file If set to kms, kms MasterKeyKMSConfig
+ should be filled, if set to file MasterKeyFileConfig
+ should be filled optional'
+ type: string
+ required:
+ - path
+ - key-id
type: object
- max-key-size:
+ type: object
+ gc:
+ properties:
+ "\tbatch-keys":
+ description: 'Optional: Defaults to 512'
format: int64
type: integer
- scheduler-concurrency:
- description: 'Optional: Defaults to 2048000'
+ "\tmax-write-bytes-per-sec":
+ type: string
+ type: object
+ import:
+ properties:
+ import-dir:
+ type: string
+ max-open-engines:
format: int64
type: integer
- scheduler-notify-capacity:
+ max-prepare-duration:
+ type: string
+ num-import-jobs:
format: int64
type: integer
- scheduler-pending-write-threshold:
- description: 'Optional: Defaults to 100MB'
+ num-import-sst-jobs:
+ format: int64
+ type: integer
+ num-threads:
+ format: int64
+ type: integer
+ region-split-size:
type: string
- scheduler-worker-pool-size:
- description: 'Optional: Defaults to 4'
+ stream-channel-window:
format: int64
type: integer
+ upload-speed-limit:
+ type: string
type: object
- type: object
- configUpdateStrategy:
- description: 'ConfigUpdateStrategy of the component. Override the
- cluster-level updateStrategy if present Optional: Defaults to
- cluster-level setting'
- type: string
- hostNetwork:
- description: 'Whether Hostnetwork of the component is enabled. Override
- the cluster-level setting if present Optional: Defaults to cluster-level
- setting'
- type: boolean
- imagePullPolicy:
- description: 'ImagePullPolicy of the component. Override the cluster-level
- imagePullPolicy if present Optional: Defaults to cluster-level
- setting'
- type: string
- limits:
- description: 'Limits describes the maximum amount of compute resources
- allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- maxFailoverCount:
- description: 'MaxFailoverCount limit the max replicas could be added
- in failover, 0 means unlimited Optional: Defaults to 0'
- format: int32
- type: integer
- nodeSelector:
- description: 'NodeSelector of the component. Merged into the cluster-level
- nodeSelector if non-empty Optional: Defaults to cluster-level
- setting'
- type: object
- podSecurityContext:
- description: PodSecurityContext holds pod-level security attributes
- and common container settings. Some fields are also present in
- container.securityContext. Field values of container.securityContext
- take precedence over field values of PodSecurityContext.
- properties:
- fsGroup:
- description: |-
- A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:
-
- 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----
-
- If unset, the Kubelet will not modify the ownership and permissions of any volume.
- format: int64
- type: integer
- runAsGroup:
- description: The GID to run the entrypoint of the container
- process. Uses runtime default if unset. May also be set in
- SecurityContext. If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence for
- that container.
- format: int64
- type: integer
- runAsNonRoot:
- description: Indicates that the container must run as a non-root
- user. If true, the Kubelet will validate the image at runtime
- to ensure that it does not run as UID 0 (root) and fail to
- start the container if it does. If unset or false, no such
- validation will be performed. May also be set in SecurityContext. If
- set in both SecurityContext and PodSecurityContext, the value
- specified in SecurityContext takes precedence.
+ log-file:
+ type: string
+ log-level:
+ description: 'Optional: Defaults to info'
+ type: string
+ log-rotation-timespan:
+ description: 'Optional: Defaults to 24h'
+ type: string
+ panic-when-unexpected-key-or-data:
type: boolean
- runAsUser:
- description: The UID to run the entrypoint of the container
- process. Defaults to user specified in image metadata if unspecified.
- May also be set in SecurityContext. If set in both SecurityContext
- and PodSecurityContext, the value specified in SecurityContext
- takes precedence for that container.
- format: int64
- type: integer
- seLinuxOptions:
- description: SELinuxOptions are the labels to be applied to
- the container
+ pd:
properties:
- level:
- description: Level is SELinux level label that applies to
- the container.
- type: string
- role:
- description: Role is a SELinux role label that applies to
- the container.
+ endpoints:
+ description: |-
+ The PD endpoints for the client.
+
+ Default is empty.
+ items:
+ type: string
+ type: array
+ retry-interval:
+ description: |-
+ The interval at which to retry a PD connection initialization.
+
+ Default is 300ms. Optional: Defaults to 300ms
type: string
- type:
- description: Type is a SELinux type label that applies to
- the container.
+ retry-log-every:
+ description: |-
+ If the client observes the same error message on retry, it can repeat the message only every `n` times.
+
+ Default is 10. Set to 1 to disable this feature. Optional: Defaults to 10
+ format: int64
+ type: integer
+ retry-max-count:
+ description: |-
+ The maximum number of times to retry a PD connection initialization.
+
+ Default is isize::MAX, represented by -1. Optional: Defaults to -1
+ format: int64
+ type: integer
+ type: object
+ raftdb:
+ properties:
+ allow-concurrent-memtable-write:
+ type: boolean
+ bytes-per-sync:
type: string
- user:
- description: User is a SELinux user label that applies to
- the container.
+ compaction-readahead-size:
type: string
- type: object
- supplementalGroups:
- description: A list of groups applied to the first process run
- in each container, in addition to the container's primary
- GID. If unspecified, no groups will be added to any container.
- items:
- format: int64
- type: integer
- type: array
- sysctls:
- description: Sysctls hold a list of namespaced sysctls used
- for the pod. Pods with unsupported sysctls (by the container
- runtime) might fail to launch.
- items:
- description: Sysctl defines a kernel parameter to be set
- properties:
- name:
- description: Name of a property to set
- type: string
- value:
- description: Value of a property to set
- type: string
- required:
- - name
- - value
- type: object
- type: array
- windowsOptions:
- description: WindowsSecurityContextOptions contain Windows-specific
- options and credentials.
- properties:
- gmsaCredentialSpec:
- description: GMSACredentialSpec is where the GMSA admission
- webhook (https://github.com/kubernetes-sigs/windows-gmsa)
- inlines the contents of the GMSA credential spec named
- by the GMSACredentialSpecName field. This field is alpha-level
- and is only honored by servers that enable the WindowsGMSA
- feature flag.
+ create-if-missing:
+ type: boolean
+ defaultcf:
+ description: TiKVCfConfig is the config of a cf
+ properties:
+ block-based-bloom-filter:
+ type: boolean
+ block-cache-size:
+ type: string
+ block-size:
+ type: string
+ bloom-filter-bits-per-key:
+ format: int64
+ type: integer
+ cache-index-and-filter-blocks:
+ type: boolean
+ compaction-pri:
+ format: int64
+ type: integer
+ compaction-style:
+ format: int64
+ type: integer
+ compression-per-level:
+ items:
+ type: string
+ type: array
+ disable-auto-compactions:
+ type: boolean
+ disable-block-cache:
+ type: boolean
+ dynamic-level-bytes:
+ type: boolean
+ enable-doubly-skiplist:
+ type: boolean
+ force-consistency-checks:
+ type: boolean
+ hard-pending-compaction-bytes-limit:
+ type: string
+ level0-file-num-compaction-trigger:
+ format: int64
+ type: integer
+ level0-slowdown-writes-trigger:
+ format: int64
+ type: integer
+ level0-stop-writes-trigger:
+ format: int64
+ type: integer
+ max-bytes-for-level-base:
+ type: string
+ max-bytes-for-level-multiplier:
+ format: int64
+ type: integer
+ max-compaction-bytes:
+ type: string
+ max-write-buffer-number:
+ format: int64
+ type: integer
+ min-write-buffer-number-to-merge:
+ format: int64
+ type: integer
+ num-levels:
+ format: int64
+ type: integer
+ optimize-filters-for-hits:
+ type: boolean
+ pin-l0-filter-and-index-blocks:
+ type: boolean
+ prop-keys-index-distance:
+ format: int64
+ type: integer
+ prop-size-index-distance:
+ format: int64
+ type: integer
+ read-amp-bytes-per-bit:
+ format: int64
+ type: integer
+ soft-pending-compaction-bytes-limit:
+ type: string
+ target-file-size-base:
+ type: string
+ titan:
+ description: TiKVTitanCfConfig is the titian config.
+ properties:
+ blob-cache-size:
+ type: string
+ blob-file-compression:
+ type: string
+ blob-run-mode:
+ type: string
+ discardable-ratio:
+ format: double
+ type: number
+ max-gc-batch-size:
+ type: string
+ merge-small-file-threshold:
+ type: string
+ min-blob-size:
+ type: string
+ min-gc-batch-size:
+ type: string
+ sample-ratio:
+ format: double
+ type: number
+ type: object
+ use-bloom-filter:
+ type: boolean
+ whole-key-filtering:
+ type: boolean
+ write-buffer-size:
+ type: string
+ type: object
+ enable-pipelined-write:
+ type: boolean
+ enable-statistics:
+ type: boolean
+ info-log-dir:
type: string
- gmsaCredentialSpecName:
- description: GMSACredentialSpecName is the name of the GMSA
- credential spec to use. This field is alpha-level and
- is only honored by servers that enable the WindowsGMSA
- feature flag.
+ info-log-keep-log-file-num:
+ format: int64
+ type: integer
+ info-log-max-size:
type: string
- runAsUserName:
- description: The UserName in Windows to run the entrypoint
- of the container process. Defaults to the user specified
- in image metadata if unspecified. May also be set in PodSecurityContext.
- If set in both SecurityContext and PodSecurityContext,
- the value specified in SecurityContext takes precedence.
- This field is alpha-level and it is only honored by servers
- that enable the WindowsRunAsUserName feature flag.
+ info-log-roll-time:
+ type: string
+ max-background-jobs:
+ format: int64
+ type: integer
+ max-manifest-file-size:
+ type: string
+ max-open-files:
+ format: int64
+ type: integer
+ max-sub-compactions:
+ format: int64
+ type: integer
+ max-total-wal-size:
+ type: string
+ stats-dump-period:
+ type: string
+ use-direct-io-for-flush-and-compaction:
+ type: boolean
+ wal-bytes-per-sync:
+ type: string
+ wal-dir:
+ type: string
+ wal-recovery-mode:
+ type: string
+ wal-size-limit:
+ type: string
+ wal-ttl-seconds:
+ format: int64
+ type: integer
+ writable-file-max-buffer-size:
type: string
type: object
- type: object
- priorityClassName:
- description: 'PriorityClassName of the component. Override the cluster-level
- one if present Optional: Defaults to cluster-level setting'
- type: string
- privileged:
- description: 'Whether create the TiKV container in privileged mode,
- it is highly discouraged to enable this in critical environment.
- Optional: defaults to false'
- type: boolean
- replicas:
- description: The desired ready replicas
- format: int32
- type: integer
- requests:
- description: 'Requests describes the minimum amount of compute resources
- required. If Requests is omitted for a container, it defaults
- to Limits if that is explicitly specified, otherwise to an implementation-defined
- value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
- type: object
- schedulerName:
- description: 'SchedulerName of the component. Override the cluster-level
- one if present Optional: Defaults to cluster-level setting'
- type: string
- storageClassName:
- description: The storageClassName of the persistent volume for TiKV
- data storage. Defaults to Kubernetes default storage class.
- type: string
- tolerations:
- description: 'Tolerations of the component. Override the cluster-level
- tolerations if non-empty Optional: Defaults to cluster-level setting'
- items:
- description: The pod this Toleration is attached to tolerates
- any taint that matches the triple using the
- matching operator .
- properties:
- effect:
- description: Effect indicates the taint effect to match. Empty
- means match all taint effects. When specified, allowed values
- are NoSchedule, PreferNoSchedule and NoExecute.
- type: string
- key:
- description: Key is the taint key that the toleration applies
- to. Empty means match all taint keys. If the key is empty,
- operator must be Exists; this combination means to match
- all values and all keys.
- type: string
- operator:
- description: Operator represents a key's relationship to the
- value. Valid operators are Exists and Equal. Defaults to
- Equal. Exists is equivalent to wildcard for value, so that
- a pod can tolerate all taints of a particular category.
- type: string
- tolerationSeconds:
- description: TolerationSeconds represents the period of time
- the toleration (which must be of effect NoExecute, otherwise
- this field is ignored) tolerates the taint. By default,
- it is not set, which means tolerate the taint forever (do
- not evict). Zero and negative values will be treated as
- 0 (evict immediately) by the system.
- format: int64
- type: integer
- value:
- description: Value is the taint value the toleration matches
- to. If the operator is Exists, the value should be empty,
- otherwise just a regular string.
- type: string
- type: object
- type: array
- version:
- description: 'Version of the component. Override the cluster-level
- version if non-empty Optional: Defaults to cluster-level setting'
- type: string
- required:
- - replicas
- type: object
+ raftstore:
+ description: TiKVRaftstoreConfig is the configuration of TiKV
+ raftstore component.
+ properties:
+ abnormal-leader-missing-duration:
+ description: / Similar to the max-leader-missing-duration,
+ instead it will log warnings and / try to alert monitoring
+ systems, if there is any.
+ type: string
+ allow-remove-leader:
+ type: boolean
+ apply-max-batch-size:
+ format: int64
+ type: integer
+ apply-pool-size:
+ description: 'Optional: Defaults to 2'
+ format: int64
+ type: integer
+ clean-stale-peer-delay:
+ description: 'delay time before deleting a stale peer Optional:
+ Defaults to 10m'
+ type: string
+ cleanup-import-sst-interval:
+ description: 'Optional: Defaults to 10m'
+ type: string
+ consistency-check-interval:
+ description: 'Interval (ms) to check region whether the
+ data is consistent. Optional: Defaults to 0'
+ type: string
+ hibernate-regions:
+ type: boolean
+ leader-transfer-max-log-lag:
+ format: int64
+ type: integer
+ lock-cf-compact-bytes-threshold:
+ description: 'Optional: Defaults to 256MB'
+ type: string
+ lock-cf-compact-interval:
+ description: 'Optional: Defaults to 10m'
+ type: string
+ max-leader-missing-duration:
+ description: / If the leader of a peer is missing for longer
+ than max-leader-missing-duration / the peer would ask
+ pd to confirm whether it is valid in any region. / If
+ the peer is stale and is not valid in any region, it will
+ destroy itself.
+ type: string
+ max-peer-down-duration:
+ description: '/ When a peer is not active for max-peer-down-duration
+ / the peer is considered to be down and is reported to
+ PD. Optional: Defaults to 5m'
+ type: string
+ merge-check-tick-interval:
+ description: / Interval to re-propose merge.
+ type: string
+ merge-max-log-gap:
+ description: / Max log gap allowed to propose merge.
+ format: int64
+ type: integer
+ messages-per-tick:
+ format: int64
+ type: integer
+ notify-capacity:
+ format: int64
+ type: integer
+ pd-heartbeat-tick-interval:
+ description: 'Optional: Defaults to 60s'
+ type: string
+ pd-store-heartbeat-tick-interval:
+ description: 'Optional: Defaults to 10s'
+ type: string
+ peer-stale-state-check-interval:
+ type: string
+ prevote:
+ description: 'Optional: Defaults to true'
+ type: boolean
+ raft-base-tick-interval:
+ description: raft-base-tick-interval is a base tick interval
+ (ms).
+ type: string
+ raft-election-timeout-ticks:
+ format: int64
+ type: integer
+ raft-entry-cache-life-time:
+ description: When a peer is not responding for this time,
+ leader will not keep entry cache for it.
+ type: string
+ raft-entry-max-size:
+ description: 'When the entry exceed the max size, reject
+ to propose it. Optional: Defaults to 8MB'
+ type: string
+ raft-heartbeat-ticks:
+ format: int64
+ type: integer
+ raft-log-gc-count-limit:
+ description: 'When entry count exceed this value, gc will
+ be forced trigger. Optional: Defaults to 72000'
+ format: int64
+ type: integer
+ raft-log-gc-size-limit:
+ description: 'When the approximate size of raft log entries
+ exceed this value gc will be forced trigger. Optional:
+ Defaults to 72MB'
+ type: string
+ raft-log-gc-threshold:
+ description: 'A threshold to gc stale raft log, must >=
+ 1. Optional: Defaults to 50'
+ format: int64
+ type: integer
+ raft-log-gc-tick-interval:
+ description: 'Interval to gc unnecessary raft log (ms).
+ Optional: Defaults to 10s'
+ type: string
+ raft-reject-transfer-leader-duration:
+ description: When a peer is newly added, reject transferring
+ leader to the peer for a while.
+ type: string
+ raft-store-max-leader-lease:
+ description: The lease provided by a successfully proposed
+ and applied entry.
+ type: string
+ region-compact-check-interval:
+ description: '/ Interval (ms) to check whether start compaction
+ for a region. Optional: Defaults to 5m'
+ type: string
+ region-compact-check-step:
+ description: '/ Number of regions for each time checking.
+ Optional: Defaults to 100'
+ format: int64
+ type: integer
+ region-compact-min-tombstones:
+ description: '/ Minimum number of tombstones to trigger
+ manual compaction. Optional: Defaults to 10000'
+ format: int64
+ type: integer
+ region-compact-tombstones-percent:
+ description: '/ Minimum percentage of tombstones to trigger
+ manual compaction. / Should between 1 and 100. Optional:
+ Defaults to 30'
+ format: int64
+ type: integer
+ region-split-check-diff:
+ description: '/ When size change of region exceed the diff
+ since last check, it / will be checked again whether it
+ should be split. Optional: Defaults to 6MB'
+ type: string
+ report-region-flow-interval:
+ type: string
+ right-derive-when-split:
+ description: Right region derive origin region id when split.
+ type: boolean
+ snap-apply-batch-size:
+ type: string
+ snap-gc-timeout:
+ type: string
+ snap-mgr-gc-tick-interval:
+ type: string
+ split-region-check-tick-interval:
+ description: 'Interval (ms) to check region whether need
+ to be split or not. Optional: Defaults to 10s'
+ type: string
+ store-max-batch-size:
+ format: int64
+ type: integer
+ store-pool-size:
+ description: 'Optional: Defaults to 2'
+ format: int64
+ type: integer
+ sync-log:
+ description: 'true for high reliability, prevent data loss
+ when power failure. Optional: Defaults to true'
+ type: boolean
+ use-delete-range:
+ type: boolean
+ type: object
+ readpool:
+ properties:
+ coprocessor:
+ properties:
+ high-concurrency:
+ description: 'Optional: Defaults to 8'
+ format: int64
+ type: integer
+ low-concurrency:
+ description: 'Optional: Defaults to 8'
+ format: int64
+ type: integer
+ max-tasks-per-worker-high:
+ description: 'Optional: Defaults to 2000'
+ format: int64
+ type: integer
+ max-tasks-per-worker-low:
+ description: 'Optional: Defaults to 2000'
+ format: int64
+ type: integer
+ max-tasks-per-worker-normal:
+ description: 'Optional: Defaults to 2000'
+ format: int64
+ type: integer
+ normal-concurrency:
+ description: 'Optional: Defaults to 8'
+ format: int64
+ type: integer
+ stack-size:
+ description: 'Optional: Defaults to 10MB'
+ type: string
+ type: object
+ storage:
+ properties:
+ high-concurrency:
+ description: 'Optional: Defaults to 4'
+ format: int64
+ type: integer
+ low-concurrency:
+ description: 'Optional: Defaults to 4'
+ format: int64
+ type: integer
+ max-tasks-per-worker-high:
+ description: 'Optional: Defaults to 2000'
+ format: int64
+ type: integer
+ max-tasks-per-worker-low:
+ description: 'Optional: Defaults to 2000'
+ format: int64
+ type: integer
+ max-tasks-per-worker-normal:
+ description: 'Optional: Defaults to 2000'
+ format: int64
+ type: integer
+ normal-concurrency:
+ description: 'Optional: Defaults to 4'
+ format: int64
+ type: integer
+ stack-size:
+ description: 'Optional: Defaults to 10MB'
+ type: string
+ type: object
+ type: object
+ rocksdb:
+ description: TiKVDbConfig is the rocksdb config.
+ properties:
+ auto-tuned:
+ type: boolean
+ bytes-per-sync:
+ type: string
+ compaction-readahead-size:
+ description: 'Optional: Defaults to 0'
+ type: string
+ create-if-missing:
+ description: 'Optional: Defaults to true'
+ type: boolean
+ defaultcf:
+ description: TiKVCfConfig is the config of a cf
+ properties:
+ block-based-bloom-filter:
+ type: boolean
+ block-cache-size:
+ type: string
+ block-size:
+ type: string
+ bloom-filter-bits-per-key:
+ format: int64
+ type: integer
+ cache-index-and-filter-blocks:
+ type: boolean
+ compaction-pri:
+ format: int64
+ type: integer
+ compaction-style:
+ format: int64
+ type: integer
+ compression-per-level:
+ items:
+ type: string
+ type: array
+ disable-auto-compactions:
+ type: boolean
+ disable-block-cache:
+ type: boolean
+ dynamic-level-bytes:
+ type: boolean
+ enable-doubly-skiplist:
+ type: boolean
+ force-consistency-checks:
+ type: boolean
+ hard-pending-compaction-bytes-limit:
+ type: string
+ level0-file-num-compaction-trigger:
+ format: int64
+ type: integer
+ level0-slowdown-writes-trigger:
+ format: int64
+ type: integer
+ level0-stop-writes-trigger:
+ format: int64
+ type: integer
+ max-bytes-for-level-base:
+ type: string
+ max-bytes-for-level-multiplier:
+ format: int64
+ type: integer
+ max-compaction-bytes:
+ type: string
+ max-write-buffer-number:
+ format: int64
+ type: integer
+ min-write-buffer-number-to-merge:
+ format: int64
+ type: integer
+ num-levels:
+ format: int64
+ type: integer
+ optimize-filters-for-hits:
+ type: boolean
+ pin-l0-filter-and-index-blocks:
+ type: boolean
+ prop-keys-index-distance:
+ format: int64
+ type: integer
+ prop-size-index-distance:
+ format: int64
+ type: integer
+ read-amp-bytes-per-bit:
+ format: int64
+ type: integer
+ soft-pending-compaction-bytes-limit:
+ type: string
+ target-file-size-base:
+ type: string
+ titan:
+ description: TiKVTitanCfConfig is the titian config.
+ properties:
+ blob-cache-size:
+ type: string
+ blob-file-compression:
+ type: string
+ blob-run-mode:
+ type: string
+ discardable-ratio:
+ format: double
+ type: number
+ max-gc-batch-size:
+ type: string
+ merge-small-file-threshold:
+ type: string
+ min-blob-size:
+ type: string
+ min-gc-batch-size:
+ type: string
+ sample-ratio:
+ format: double
+ type: number
+ type: object
+ use-bloom-filter:
+ type: boolean
+ whole-key-filtering:
+ type: boolean
+ write-buffer-size:
+ type: string
+ type: object
+ enable-pipelined-write:
+ type: boolean
+ enable-statistics:
+ description: 'Optional: Defaults to true'
+ type: boolean
+ info-log-dir:
+ type: string
+ info-log-keep-log-file-num:
+ format: int64
+ type: integer
+ info-log-max-size:
+ type: string
+ info-log-roll-time:
+ type: string
+ lockcf:
+ description: TiKVCfConfig is the config of a cf
+ properties:
+ block-based-bloom-filter:
+ type: boolean
+ block-cache-size:
+ type: string
+ block-size:
+ type: string
+ bloom-filter-bits-per-key:
+ format: int64
+ type: integer
+ cache-index-and-filter-blocks:
+ type: boolean
+ compaction-pri:
+ format: int64
+ type: integer
+ compaction-style:
+ format: int64
+ type: integer
+ compression-per-level:
+ items:
+ type: string
+ type: array
+ disable-auto-compactions:
+ type: boolean
+ disable-block-cache:
+ type: boolean
+ dynamic-level-bytes:
+ type: boolean
+ enable-doubly-skiplist:
+ type: boolean
+ force-consistency-checks:
+ type: boolean
+ hard-pending-compaction-bytes-limit:
+ type: string
+ level0-file-num-compaction-trigger:
+ format: int64
+ type: integer
+ level0-slowdown-writes-trigger:
+ format: int64
+ type: integer
+ level0-stop-writes-trigger:
+ format: int64
+ type: integer
+ max-bytes-for-level-base:
+ type: string
+ max-bytes-for-level-multiplier:
+ format: int64
+ type: integer
+ max-compaction-bytes:
+ type: string
+ max-write-buffer-number:
+ format: int64
+ type: integer
+ min-write-buffer-number-to-merge:
+ format: int64
+ type: integer
+ num-levels:
+ format: int64
+ type: integer
+ optimize-filters-for-hits:
+ type: boolean
+ pin-l0-filter-and-index-blocks:
+ type: boolean
+ prop-keys-index-distance:
+ format: int64
+ type: integer
+ prop-size-index-distance:
+ format: int64
+ type: integer
+ read-amp-bytes-per-bit:
+ format: int64
+ type: integer
+ soft-pending-compaction-bytes-limit:
+ type: string
+ target-file-size-base:
+ type: string
+ titan:
+ description: TiKVTitanCfConfig is the titian config.
+ properties:
+ blob-cache-size:
+ type: string
+ blob-file-compression:
+ type: string
+ blob-run-mode:
+ type: string
+ discardable-ratio:
+ format: double
+ type: number
+ max-gc-batch-size:
+ type: string
+ merge-small-file-threshold:
+ type: string
+ min-blob-size:
+ type: string
+ min-gc-batch-size:
+ type: string
+ sample-ratio:
+ format: double
+ type: number
+ type: object
+ use-bloom-filter:
+ type: boolean
+ whole-key-filtering:
+ type: boolean
+ write-buffer-size:
+ type: string
+ type: object
+ max-background-jobs:
+ description: 'Optional: Defaults to 8'
+ format: int64
+ type: integer
+ max-manifest-file-size:
+ description: 'Optional: Defaults to 128MB'
+ type: string
+ max-open-files:
+ description: 'Optional: Defaults to 40960'
+ format: int64
+ type: integer
+ max-sub-compactions:
+ description: 'Optional: Defaults to 3'
+ format: int64
+ type: integer
+ max-total-wal-size:
+ description: 'Optional: Defaults to 4GB'
+ type: string
+ raftcf:
+ description: TiKVCfConfig is the config of a cf
+ properties:
+ block-based-bloom-filter:
+ type: boolean
+ block-cache-size:
+ type: string
+ block-size:
+ type: string
+ bloom-filter-bits-per-key:
+ format: int64
+ type: integer
+ cache-index-and-filter-blocks:
+ type: boolean
+ compaction-pri:
+ format: int64
+ type: integer
+ compaction-style:
+ format: int64
+ type: integer
+ compression-per-level:
+ items:
+ type: string
+ type: array
+ disable-auto-compactions:
+ type: boolean
+ disable-block-cache:
+ type: boolean
+ dynamic-level-bytes:
+ type: boolean
+ enable-doubly-skiplist:
+ type: boolean
+ force-consistency-checks:
+ type: boolean
+ hard-pending-compaction-bytes-limit:
+ type: string
+ level0-file-num-compaction-trigger:
+ format: int64
+ type: integer
+ level0-slowdown-writes-trigger:
+ format: int64
+ type: integer
+ level0-stop-writes-trigger:
+ format: int64
+ type: integer
+ max-bytes-for-level-base:
+ type: string
+ max-bytes-for-level-multiplier:
+ format: int64
+ type: integer
+ max-compaction-bytes:
+ type: string
+ max-write-buffer-number:
+ format: int64
+ type: integer
+ min-write-buffer-number-to-merge:
+ format: int64
+ type: integer
+ num-levels:
+ format: int64
+ type: integer
+ optimize-filters-for-hits:
+ type: boolean
+ pin-l0-filter-and-index-blocks:
+ type: boolean
+ prop-keys-index-distance:
+ format: int64
+ type: integer
+ prop-size-index-distance:
+ format: int64
+ type: integer
+ read-amp-bytes-per-bit:
+ format: int64
+ type: integer
+ soft-pending-compaction-bytes-limit:
+ type: string
+ target-file-size-base:
+ type: string
+ titan:
+ description: TiKVTitanCfConfig is the titian config.
+ properties:
+ blob-cache-size:
+ type: string
+ blob-file-compression:
+ type: string
+ blob-run-mode:
+ type: string
+ discardable-ratio:
+ format: double
+ type: number
+ max-gc-batch-size:
+ type: string
+ merge-small-file-threshold:
+ type: string
+ min-blob-size:
+ type: string
+ min-gc-batch-size:
+ type: string
+ sample-ratio:
+ format: double
+ type: number
+ type: object
+ use-bloom-filter:
+ type: boolean
+ whole-key-filtering:
+ type: boolean
+ write-buffer-size:
+ type: string
+ type: object
+ rate-bytes-per-sec:
+ type: string
+ rate-limiter-mode:
+ format: int64
+ type: integer
+ stats-dump-period:
+ description: 'Optional: Defaults to 10m'
+ type: string
+ titan:
+ description: TiKVTitanDBConfig is the config a titian db.
+ properties:
+ dirname:
+ type: string
+ disable-gc:
+ type: boolean
+ enabled:
+ type: boolean
+ max-background-gc:
+ format: int64
+ type: integer
+ purge-obsolete-files-period:
+ description: The value of this field will be truncated
+ to seconds.
+ type: string
+ type: object
+ use-direct-io-for-flush-and-compaction:
+ type: boolean
+ wal-bytes-per-sync:
+ type: string
+ wal-recovery-mode:
+ description: 'Optional: Defaults to 2'
+ format: int64
+ type: integer
+ wal-size-limit:
+ type: string
+ wal-ttl-seconds:
+ format: int64
+ type: integer
+ writable-file-max-buffer-size:
+ type: string
+ writecf:
+ description: TiKVCfConfig is the config of a cf
+ properties:
+ block-based-bloom-filter:
+ type: boolean
+ block-cache-size:
+ type: string
+ block-size:
+ type: string
+ bloom-filter-bits-per-key:
+ format: int64
+ type: integer
+ cache-index-and-filter-blocks:
+ type: boolean
+ compaction-pri:
+ format: int64
+ type: integer
+ compaction-style:
+ format: int64
+ type: integer
+ compression-per-level:
+ items:
+ type: string
+ type: array
+ disable-auto-compactions:
+ type: boolean
+ disable-block-cache:
+ type: boolean
+ dynamic-level-bytes:
+ type: boolean
+ enable-doubly-skiplist:
+ type: boolean
+ force-consistency-checks:
+ type: boolean
+ hard-pending-compaction-bytes-limit:
+ type: string
+ level0-file-num-compaction-trigger:
+ format: int64
+ type: integer
+ level0-slowdown-writes-trigger:
+ format: int64
+ type: integer
+ level0-stop-writes-trigger:
+ format: int64
+ type: integer
+ max-bytes-for-level-base:
+ type: string
+ max-bytes-for-level-multiplier:
+ format: int64
+ type: integer
+ max-compaction-bytes:
+ type: string
+ max-write-buffer-number:
+ format: int64
+ type: integer
+ min-write-buffer-number-to-merge:
+ format: int64
+ type: integer
+ num-levels:
+ format: int64
+ type: integer
+ optimize-filters-for-hits:
+ type: boolean
+ pin-l0-filter-and-index-blocks:
+ type: boolean
+ prop-keys-index-distance:
+ format: int64
+ type: integer
+ prop-size-index-distance:
+ format: int64
+ type: integer
+ read-amp-bytes-per-bit:
+ format: int64
+ type: integer
+ soft-pending-compaction-bytes-limit:
+ type: string
+ target-file-size-base:
+ type: string
+ titan:
+ description: TiKVTitanCfConfig is the titian config.
+ properties:
+ blob-cache-size:
+ type: string
+ blob-file-compression:
+ type: string
+ blob-run-mode:
+ type: string
+ discardable-ratio:
+ format: double
+ type: number
+ max-gc-batch-size:
+ type: string
+ merge-small-file-threshold:
+ type: string
+ min-blob-size:
+ type: string
+ min-gc-batch-size:
+ type: string
+ sample-ratio:
+ format: double
+ type: number
+ type: object
+ use-bloom-filter:
+ type: boolean
+ whole-key-filtering:
+ type: boolean
+ write-buffer-size:
+ type: string
+ type: object
+ type: object
+ security:
+ properties:
+ ca-path:
+ type: string
+ cert-path:
+ type: string
+ cipher-file:
+ type: string
+ key-path:
+ type: string
+ override-ssl-target:
+ type: string
+ type: object
+ server:
+ description: TiKVServerConfig is the configuration of TiKV server.
+ properties:
+ concurrent-recv-snap-limit:
+ description: 'Optional: Defaults to 32'
+ format: int32
+ type: integer
+ concurrent-send-snap-limit:
+ description: 'Optional: Defaults to 32'
+ format: int32
+ type: integer
+ end-point-batch-row-limit:
+ format: int32
+ type: integer
+ end-point-enable-batch-if-possible:
+ format: int32
+ type: integer
+ end-point-recursion-limit:
+ description: 'Optional: Defaults to 1000'
+ format: int32
+ type: integer
+ end-point-request-max-handle-duration:
+ type: string
+ end-point-stream-batch-row-limit:
+ format: int32
+ type: integer
+ end-point-stream-channel-size:
+ format: int32
+ type: integer
+ grpc-compression-type:
+ description: 'Optional: Defaults to none'
+ type: string
+ grpc-concurrency:
+ description: 'Optional: Defaults to 4'
+ format: int32
+ type: integer
+ grpc-concurrent-stream:
+ description: 'Optional: Defaults to 1024'
+ format: int32
+ type: integer
+ grpc-keepalive-time:
+ description: 'Optional: Defaults to 10s'
+ type: string
+ grpc-keepalive-timeout:
+ description: 'Optional: Defaults to 3s'
+ type: string
+ grpc-memory-pool-quota:
+ description: 'Optional: Defaults to 32G'
+ type: string
+ grpc-raft-conn-num:
+ description: 'Optional: Defaults to 10'
+ format: int32
+ type: integer
+ grpc-stream-initial-window-size:
+ description: 'Optional: Defaults to 2MB'
+ type: string
+ heavy-load-threshold:
+ format: int32
+ type: integer
+ heavy-load-wait-duration:
+ description: 'Optional: Defaults to 60s'
+ type: string
+ labels:
+ type: object
+ snap-max-total-size:
+ type: string
+ snap-max-write-bytes-per-sec:
+ description: 'Optional: Defaults to 100MB'
+ type: string
+ stats-concurrency:
+ format: int32
+ type: integer
+ status-thread-pool-size:
+ description: 'Optional: Defaults to 1'
+ type: string
+ type: object
+ storage:
+ description: TiKVStorageConfig is the config of storage
+ properties:
+ block-cache:
+ description: TiKVBlockCacheConfig is the config of a block
+ cache
+ properties:
+ capacity:
+ type: string
+ high-pri-pool-ratio:
+ format: double
+ type: number
+ memory-allocator:
+ type: string
+ num-shard-bits:
+ format: int64
+ type: integer
+ shared:
+ description: 'Optional: Defaults to true'
+ type: boolean
+ strict-capacity-limit:
+ type: boolean
+ type: object
+ max-key-size:
+ format: int64
+ type: integer
+ scheduler-concurrency:
+ description: 'Optional: Defaults to 2048000'
+ format: int64
+ type: integer
+ scheduler-notify-capacity:
+ format: int64
+ type: integer
+ scheduler-pending-write-threshold:
+ description: 'Optional: Defaults to 100MB'
+ type: string
+ scheduler-worker-pool-size:
+ description: 'Optional: Defaults to 4'
+ format: int64
+ type: integer
+ type: object
+ type: object
+ configUpdateStrategy:
+ description: 'ConfigUpdateStrategy of the component. Override the
+ cluster-level updateStrategy if present Optional: Defaults to
+ cluster-level setting'
+ type: string
+ env:
+ description: List of environment variables to set in the container,
+ like v1.Container.Env. Note that following env names cannot be
+ used and may be overrided by tidb-operator built envs. - NAMESPACE
+ - TZ - SERVICE_NAME - PEER_SERVICE_NAME - HEADLESS_SERVICE_NAME
+ - SET_NAME - HOSTNAME - CLUSTER_NAME - POD_NAME - BINLOG_ENABLED
+ - SLOW_LOG_FILE
+ items:
+ description: EnvVar represents an environment variable present
+ in a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are expanded
+ using the previous defined environment variables in the
+ container and any service environment variables. If a variable
+ cannot be resolved, the reference in the input string will
+ be unchanged. The $(VAR_NAME) syntax can be escaped with
+ a double $$, ie: $$(VAR_NAME). Escaped references will never
+ be expanded, regardless of whether the variable exists or
+ not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: EnvVarSource represents a source for the value
+ of an EnvVar.
+ properties:
+ configMapKeyRef:
+ description: Selects a key from a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its
+ key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: ObjectFieldSelector selects an APIVersioned
+ field of an object.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath is
+ written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the specified
+ API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: ResourceFieldSelector represents container
+ resources (cpu, memory) and their output format
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor: {}
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: SecretKeySelector selects a key of a Secret.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ hostNetwork:
+ description: 'Whether Hostnetwork of the component is enabled. Override
+ the cluster-level setting if present Optional: Defaults to cluster-level
+ setting'
+ type: boolean
+ imagePullPolicy:
+ description: 'ImagePullPolicy of the component. Override the cluster-level
+ imagePullPolicy if present Optional: Defaults to cluster-level
+ setting'
+ type: string
+ limits:
+ description: 'Limits describes the maximum amount of compute resources
+ allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ maxFailoverCount:
+ description: 'MaxFailoverCount limit the max replicas could be added
+ in failover, 0 means no failover Optional: Defaults to 3'
+ format: int32
+ type: integer
+ nodeSelector:
+ description: 'NodeSelector of the component. Merged into the cluster-level
+ nodeSelector if non-empty Optional: Defaults to cluster-level
+ setting'
+ type: object
+ podSecurityContext:
+ description: PodSecurityContext holds pod-level security attributes
+ and common container settings. Some fields are also present in
+ container.securityContext. Field values of container.securityContext
+ take precedence over field values of PodSecurityContext.
+ properties:
+ fsGroup:
+ description: |-
+ A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:
+
+ 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----
+
+ If unset, the Kubelet will not modify the ownership and permissions of any volume.
+ format: int64
+ type: integer
+ runAsGroup:
+ description: The GID to run the entrypoint of the container
+ process. Uses runtime default if unset. May also be set in
+ SecurityContext. If set in both SecurityContext and PodSecurityContext,
+ the value specified in SecurityContext takes precedence for
+ that container.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: Indicates that the container must run as a non-root
+ user. If true, the Kubelet will validate the image at runtime
+ to ensure that it does not run as UID 0 (root) and fail to
+ start the container if it does. If unset or false, no such
+ validation will be performed. May also be set in SecurityContext. If
+ set in both SecurityContext and PodSecurityContext, the value
+ specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: The UID to run the entrypoint of the container
+ process. Defaults to user specified in image metadata if unspecified.
+ May also be set in SecurityContext. If set in both SecurityContext
+ and PodSecurityContext, the value specified in SecurityContext
+ takes precedence for that container.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: SELinuxOptions are the labels to be applied to
+ the container
+ properties:
+ level:
+ description: Level is SELinux level label that applies to
+ the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that applies to
+ the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that applies to
+ the container.
+ type: string
+ user:
+ description: User is a SELinux user label that applies to
+ the container.
+ type: string
+ type: object
+ supplementalGroups:
+ description: A list of groups applied to the first process run
+ in each container, in addition to the container's primary
+ GID. If unspecified, no groups will be added to any container.
+ items:
+ format: int64
+ type: integer
+ type: array
+ sysctls:
+ description: Sysctls hold a list of namespaced sysctls used
+ for the pod. Pods with unsupported sysctls (by the container
+ runtime) might fail to launch.
+ items:
+ description: Sysctl defines a kernel parameter to be set
+ properties:
+ name:
+ description: Name of a property to set
+ type: string
+ value:
+ description: Value of a property to set
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ windowsOptions:
+ description: WindowsSecurityContextOptions contain Windows-specific
+ options and credentials.
+ properties:
+ gmsaCredentialSpec:
+ description: GMSACredentialSpec is where the GMSA admission
+ webhook (https://github.com/kubernetes-sigs/windows-gmsa)
+ inlines the contents of the GMSA credential spec named
+ by the GMSACredentialSpecName field. This field is alpha-level
+ and is only honored by servers that enable the WindowsGMSA
+ feature flag.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name of the GMSA
+ credential spec to use. This field is alpha-level and
+ is only honored by servers that enable the WindowsGMSA
+ feature flag.
+ type: string
+ runAsUserName:
+ description: The UserName in Windows to run the entrypoint
+ of the container process. Defaults to the user specified
+ in image metadata if unspecified. May also be set in PodSecurityContext.
+ If set in both SecurityContext and PodSecurityContext,
+ the value specified in SecurityContext takes precedence.
+ This field is alpha-level and it is only honored by servers
+ that enable the WindowsRunAsUserName feature flag.
+ type: string
+ type: object
+ type: object
+ priorityClassName:
+ description: 'PriorityClassName of the component. Override the cluster-level
+ one if present Optional: Defaults to cluster-level setting'
+ type: string
+ privileged:
+ description: 'Whether create the TiKV container in privileged mode,
+ it is highly discouraged to enable this in critical environment.
+ Optional: defaults to false'
+ type: boolean
+ replicas:
+ description: The desired ready replicas
+ format: int32
+ type: integer
+ requests:
+ description: 'Requests describes the minimum amount of compute resources
+ required. If Requests is omitted for a container, it defaults
+ to Limits if that is explicitly specified, otherwise to an implementation-defined
+ value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ schedulerName:
+ description: 'SchedulerName of the component. Override the cluster-level
+ one if present Optional: Defaults to cluster-level setting'
+ type: string
+ serviceAccount:
+ description: Specify a Service Account for tikv
+ type: string
+ storageClassName:
+ description: The storageClassName of the persistent volume for TiKV
+ data storage. Defaults to Kubernetes default storage class.
+ type: string
+ tolerations:
+ description: 'Tolerations of the component. Override the cluster-level
+ tolerations if non-empty Optional: Defaults to cluster-level setting'
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple using the
+ matching operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty
+ means match all taint effects. When specified, allowed values
+ are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies
+ to. Empty means match all taint keys. If the key is empty,
+ operator must be Exists; this combination means to match
+ all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the
+ value. Valid operators are Exists and Equal. Defaults to
+ Equal. Exists is equivalent to wildcard for value, so that
+ a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time
+ the toleration (which must be of effect NoExecute, otherwise
+ this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do
+ not evict). Zero and negative values will be treated as
+ 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches
+ to. If the operator is Exists, the value should be empty,
+ otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ version:
+ description: 'Version of the component. Override the cluster-level
+ version if non-empty Optional: Defaults to cluster-level setting'
+ type: string
+ required:
+ - replicas
+ type: object
timezone:
description: 'Time zone of TiDB cluster Pods Optional: Defaults to UTC'
type: string
+ tlsCluster: {}
+ tolerations:
+ description: Base tolerations of TiDB cluster Pods, components may add
+ more tolerations upon this respectively
+ items:
+ description: The pod this Toleration is attached to tolerates any
+ taint that matches the triple using the matching
+ operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty
+ means match all taint effects. When specified, allowed values
+ are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies
+ to. Empty means match all taint keys. If the key is empty, operator
+ must be Exists; this combination means to match all values and
+ all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal. Exists
+ is equivalent to wildcard for value, so that a pod can tolerate
+ all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the
+ toleration (which must be of effect NoExecute, otherwise this
+ field is ignored) tolerates the taint. By default, it is not
+ set, which means tolerate the taint forever (do not evict).
+ Zero and negative values will be treated as 0 (evict immediately)
+ by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise
+ just a regular string.
+ type: string
+ type: object
+ type: array
+ version:
+ description: TiDB cluster version
+ type: string
+ required:
+ - pd
+ - tidb
+ - tikv
+ type: object
+ type: object
+ version: v1alpha1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ name: backups.pingcap.com
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.backupPath
+ description: The full path of backup data
+ name: BackupPath
+ type: string
+ - JSONPath: .status.backupSize
+ description: The data size of the backup
+ name: BackupSize
+ type: integer
+ - JSONPath: .status.commitTs
+ description: The commit ts of tidb cluster dump
+ name: CommitTS
+ type: string
+ - JSONPath: .status.timeStarted
+ description: The time at which the backup was started
+ name: Started
+ priority: 1
+ type: date
+ - JSONPath: .status.timeCompleted
+ description: The time at which the backup was completed
+ name: Completed
+ priority: 1
+ type: date
+ - JSONPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ group: pingcap.com
+ names:
+ kind: Backup
+ plural: backups
+ shortNames:
+ - bk
+ scope: Namespaced
+ validation:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ spec:
+ description: BackupSpec contains the backup specification for a tidb cluster.
+ properties:
+ affinity:
+ description: Affinity is a group of affinity scheduling rules.
+ properties:
+ nodeAffinity:
+ description: Node affinity is a group of node affinity scheduling
+ rules.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes
+ that satisfy the affinity expressions specified by this field,
+ but it may choose a node that violates one or more of the
+ expressions. The node that is most preferred is the one with
+ the greatest sum of weights, i.e. for each node that meets
+ all of the scheduling requirements (resource request, requiredDuringScheduling
+ affinity expressions, etc.), compute a sum by iterating through
+ the elements of this field and adding "weight" to the sum
+ if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all
+ objects with implicit weight 0 (i.e. it's a no-op). A null
+ preferred scheduling term matches no objects (i.e. is also
+ a no-op).
+ properties:
+ preference:
+ description: A null or empty node selector term matches
+ no objects. The requirements of them are ANDed. The
+ TopologySelectorTerm type implements a subset of the
+ NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: A node selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the
+ operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be
+ empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will
+ be interpreted as an integer. This array is
+ replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: A node selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the
+ operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be
+ empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will
+ be interpreted as an integer. This array is
+ replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding
+ nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - weight
+ - preference
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: A node selector represents the union of the results
+ of one or more label queries over a set of nodes; that is,
+ it represents the OR of the selectors represented by the node
+ selector terms.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The
+ terms are ORed.
+ items:
+ description: A null or empty node selector term matches
+ no objects. The requirements of them are ANDed. The
+ TopologySelectorTerm type implements a subset of the
+ NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: A node selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the
+ operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be
+ empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will
+ be interpreted as an integer. This array is
+ replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: A node selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the
+ operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be
+ empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will
+ be interpreted as an integer. This array is
+ replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: Pod affinity is a group of inter pod affinity scheduling
+ rules.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes
+ that satisfy the affinity expressions specified by this field,
+ but it may choose a node that violates one or more of the
+ expressions. The node that is most preferred is the one with
+ the greatest sum of weights, i.e. for each node that meets
+ all of the scheduling requirements (resource request, requiredDuringScheduling
+ affinity expressions, etc.), compute a sum by iterating through
+ the elements of this field and adding "weight" to the sum
+ if the node has pods which matches the corresponding podAffinityTerm;
+ the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s))
+ that this pod should be co-located (affinity) or not
+ co-located (anti-affinity) with, where co-located is
+ defined as running on a node whose value of the label
+ with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over
+ a set of resources. The result of matchLabels and
+ matchExpressions are ANDed. An empty label selector
+ matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty. If
+ the operator is Exists or DoesNotExist,
+ the values array must be empty. This array
+ is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is "In",
+ and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces
+ the labelSelector applies to (matches against);
+ null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods
+ matching the labelSelector in the specified namespaces,
+ where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches
+ that of any node on which any of the selected pods
+ is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding
+ podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - weight
+ - podAffinityTerm
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this
+ field are not met at scheduling time, the pod will not be
+ scheduled onto the node. If the affinity requirements specified
+ by this field cease to be met at some point during pod execution
+ (e.g. due to a pod label update), the system may or may not
+ try to eventually evict the pod from its node. When there
+ are multiple elements, the lists of nodes corresponding to
+ each podAffinityTerm are intersected, i.e. all terms must
+ be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s)) that
+ this pod should be co-located (affinity) or not co-located
+ (anti-affinity) with, where co-located is defined as running
+ on a node whose value of the label with key
+ matches that of any node on which a pod of the set of pods
+ is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over a
+ set of resources. The result of matchLabels and matchExpressions
+ are ANDed. An empty label selector matches all objects.
+ A null label selector matches no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values
+ array must be non-empty. If the operator is
+ Exists or DoesNotExist, the values array must
+ be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field
+ is "key", the operator is "In", and the values array
+ contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the
+ labelSelector applies to (matches against); null or
+ empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where
+ co-located is defined as running on a node whose value
+ of the label with key topologyKey matches that of any
+ node on which any of the selected pods is running. Empty
+ topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: Pod anti affinity is a group of inter pod anti affinity
+ scheduling rules.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes
+ that satisfy the anti-affinity expressions specified by this
+ field, but it may choose a node that violates one or more
+ of the expressions. The node that is most preferred is the
+ one with the greatest sum of weights, i.e. for each node that
+ meets all of the scheduling requirements (resource request,
+ requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field
+ and adding "weight" to the sum if the node has pods which
+ matches the corresponding podAffinityTerm; the node(s) with
+ the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s))
+ that this pod should be co-located (affinity) or not
+ co-located (anti-affinity) with, where co-located is
+ defined as running on a node whose value of the label
+ with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over
+ a set of resources. The result of matchLabels and
+ matchExpressions are ANDed. An empty label selector
+ matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty. If
+ the operator is Exists or DoesNotExist,
+ the values array must be empty. This array
+ is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is "In",
+ and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces
+ the labelSelector applies to (matches against);
+ null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods
+ matching the labelSelector in the specified namespaces,
+ where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches
+ that of any node on which any of the selected pods
+ is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding
+ podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - weight
+ - podAffinityTerm
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by
+ this field are not met at scheduling time, the pod will not
+ be scheduled onto the node. If the anti-affinity requirements
+ specified by this field cease to be met at some point during
+ pod execution (e.g. due to a pod label update), the system
+ may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding
+ to each podAffinityTerm are intersected, i.e. all terms must
+ be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s)) that
+ this pod should be co-located (affinity) or not co-located
+ (anti-affinity) with, where co-located is defined as running
+ on a node whose value of the label with key
+ matches that of any node on which a pod of the set of pods
+ is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over a
+ set of resources. The result of matchLabels and matchExpressions
+ are ANDed. An empty label selector matches all objects.
+ A null label selector matches no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values
+ array must be non-empty. If the operator is
+ Exists or DoesNotExist, the values array must
+ be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field
+ is "key", the operator is "In", and the values array
+ contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the
+ labelSelector applies to (matches against); null or
+ empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where
+ co-located is defined as running on a node whose value
+ of the label with key topologyKey matches that of any
+ node on which any of the selected pods is running. Empty
+ topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ backupType:
+ description: Type is the backup type for tidb cluster.
+ type: string
+ br:
+ description: BRConfig contains config for BR
+ properties:
+ checksum:
+ description: Checksum specifies whether to run checksum after backup
+ type: boolean
+ cluster:
+ description: ClusterName of backup/restore cluster
+ type: string
+ clusterNamespace:
+ description: Namespace of backup/restore cluster
+ type: string
+ concurrency:
+ description: Concurrency is the size of thread pool on each node
+ that execute the backup task
+ format: int64
+ type: integer
+ db:
+ description: DB is the specific DB which will be backed-up or restored
+ type: string
+ logLevel:
+ description: LogLevel is the log level
+ type: string
+ onLine:
+ description: OnLine specifies whether online during restore
+ type: boolean
+ rateLimit:
+ description: RateLimit is the rate limit of the backup task, MB/s
+ per node
+ format: int32
+ type: integer
+ sendCredToTikv:
+ description: SendCredToTikv specifies whether to send credentials
+ to TiKV
+ type: boolean
+ statusAddr:
+ description: StatusAddr is the HTTP listening address for the status
+ report service. Set to empty string to disable
+ type: string
+ table:
+ description: Table is the specific table which will be backed-up
+ or restored
+ type: string
+ timeAgo:
+ description: TimeAgo is the history version of the backup task,
+ e.g. 1m, 1h
+ type: string
+ required:
+ - cluster
+ type: object
+ from:
+ description: TiDBAccessConfig defines the configuration for access tidb
+ cluster
+ properties:
+ host:
+ description: Host is the tidb cluster access address
+ type: string
+ port:
+ description: Port is the port number to use for connecting tidb
+ cluster
+ format: int32
+ type: integer
+ secretName:
+ description: SecretName is the name of secret which stores tidb
+ cluster's password.
+ type: string
+ tlsClient: {}
+ user:
+ description: User is the user for login tidb cluster
+ type: string
+ required:
+ - host
+ - secretName
+ type: object
+ gcs:
+ description: GcsStorageProvider represents the google cloud storage
+ for storing backups.
+ properties:
+ bucket:
+ description: Bucket in which to store the backup data.
+ type: string
+ bucketAcl:
+ description: BucketAcl represents the access control list for new
+ buckets
+ type: string
+ location:
+ description: Location in which the gcs bucket is located.
+ type: string
+ objectAcl:
+ description: ObjectAcl represents the access control list for new
+ objects
+ type: string
+ path:
+ description: 'Path is the full path where the backup is saved. The
+ format of the path must be: "/"'
+ type: string
+ projectId:
+ description: ProjectId represents the project that organizes all
+ your Google Cloud Platform resources
+ type: string
+ secretName:
+ description: SecretName is the name of secret which stores the gcs
+ service account credentials JSON .
+ type: string
+ storageClass:
+ description: StorageClass represents the storage class
+ type: string
+ required:
+ - projectId
+ - secretName
+ type: object
+ s3:
+ description: S3StorageProvider represents a S3 compliant storage for
+ storing backups.
+ properties:
+ acl:
+ description: Acl represents access control permissions for this
+ bucket
+ type: string
+ bucket:
+ description: Bucket in which to store the backup data.
+ type: string
+ endpoint:
+ description: Endpoint of S3 compatible storage service
+ type: string
+ path:
+ description: 'Path is the full path where the backup is saved. The
+ format of the path must be: "/"'
+ type: string
+ prefix:
+ description: Prefix for the keys.
+ type: string
+ provider:
+ description: Provider represents the specific storage provider that
+ implements the S3 interface
+ type: string
+ region:
+ description: Region in which the S3 compatible bucket is located.
+ type: string
+ secretName:
+ description: SecretName is the name of secret which stores S3 compliant
+ storage access key and secret key.
+ type: string
+ sse:
+ description: SSE Sever-Side Encryption.
+ type: string
+ storageClass:
+ description: StorageClass represents the storage class
+ type: string
+ required:
+ - provider
+ type: object
+ serviceAccount:
+ description: Specify service account of backup
+ type: string
+ storageClassName:
+ description: The storageClassName of the persistent volume for Backup
+ data storage. Defaults to Kubernetes default storage class.
+ type: string
+ storageSize:
+ description: StorageSize is the request storage size for backup job
+ type: string
+ tikvGCLifeTime:
+ description: TikvGCLifeTime is to specify the safe gc life time for
+ backup. The time limit during which data is retained for each GC,
+ in the format of Go Duration. When a GC happens, the current time
+ minus this value is the safe point.
+ type: string
tolerations:
- description: Base tolerations of TiDB cluster Pods, components may add
- more tolreations upon this respectively
+ description: Base tolerations of backup Pods, components may add more
+ tolerations upon this respectively
items:
description: The pod this Toleration is attached to tolerates any
taint that matches the triple using the matching
@@ -6092,13 +8593,9 @@ spec:
type: string
type: object
type: array
- version:
- description: TiDB cluster version
- type: string
- required:
- - pd
- - tidb
- - tikv
+ useKMS:
+ description: Use KMS to decrypt the secrets
+ type: boolean
type: object
type: object
version: v1alpha1
@@ -6107,37 +8604,26 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
creationTimestamp: null
- name: backups.pingcap.com
+ name: restores.pingcap.com
spec:
additionalPrinterColumns:
- - JSONPath: .status.backupPath
- description: The full path of backup data
- name: BackupPath
- type: string
- - JSONPath: .status.backupSize
- description: The data size of the backup
- name: BackupSize
- type: integer
- - JSONPath: .status.commitTs
- description: The commit ts of tidb cluster dump
- name: CommitTS
- type: string
- JSONPath: .status.timeStarted
description: The time at which the backup was started
name: Started
- priority: 1
type: date
- JSONPath: .status.timeCompleted
- description: The time at which the backup was completed
+ description: The time at which the restore was completed
name: Completed
- priority: 1
+ type: date
+ - JSONPath: .metadata.creationTimestamp
+ name: Age
type: date
group: pingcap.com
names:
- kind: Backup
- plural: backups
+ kind: Restore
+ plural: restores
shortNames:
- - bk
+ - rt
scope: Namespaced
validation:
openAPIV3Schema:
@@ -6153,234 +8639,608 @@ spec:
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
spec:
- description: BackupSpec contains the backup specification for a tidb cluster.
+ description: RestoreSpec contains the specification for a restore of a tidb
+ cluster backup.
properties:
+ affinity:
+ description: Affinity is a group of affinity scheduling rules.
+ properties:
+ nodeAffinity:
+ description: Node affinity is a group of node affinity scheduling
+ rules.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes
+ that satisfy the affinity expressions specified by this field,
+ but it may choose a node that violates one or more of the
+ expressions. The node that is most preferred is the one with
+ the greatest sum of weights, i.e. for each node that meets
+ all of the scheduling requirements (resource request, requiredDuringScheduling
+ affinity expressions, etc.), compute a sum by iterating through
+ the elements of this field and adding "weight" to the sum
+ if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all
+ objects with implicit weight 0 (i.e. it's a no-op). A null
+ preferred scheduling term matches no objects (i.e. is also
+ a no-op).
+ properties:
+ preference:
+ description: A null or empty node selector term matches
+ no objects. The requirements of them are ANDed. The
+ TopologySelectorTerm type implements a subset of the
+ NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: A node selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the
+ operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be
+ empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will
+ be interpreted as an integer. This array is
+ replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: A node selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the
+ operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be
+ empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will
+ be interpreted as an integer. This array is
+ replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding
+ nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - weight
+ - preference
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: A node selector represents the union of the results
+ of one or more label queries over a set of nodes; that is,
+ it represents the OR of the selectors represented by the node
+ selector terms.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The
+ terms are ORed.
+ items:
+ description: A null or empty node selector term matches
+ no objects. The requirements of them are ANDed. The
+ TopologySelectorTerm type implements a subset of the
+ NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: A node selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the
+ operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be
+ empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will
+ be interpreted as an integer. This array is
+ replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: A node selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the
+ operator is In or NotIn, the values array
+ must be non-empty. If the operator is Exists
+ or DoesNotExist, the values array must be
+ empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will
+ be interpreted as an integer. This array is
+ replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: Pod affinity is a group of inter pod affinity scheduling
+ rules.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes
+ that satisfy the affinity expressions specified by this field,
+ but it may choose a node that violates one or more of the
+ expressions. The node that is most preferred is the one with
+ the greatest sum of weights, i.e. for each node that meets
+ all of the scheduling requirements (resource request, requiredDuringScheduling
+ affinity expressions, etc.), compute a sum by iterating through
+ the elements of this field and adding "weight" to the sum
+ if the node has pods which matches the corresponding podAffinityTerm;
+ the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s))
+ that this pod should be co-located (affinity) or not
+ co-located (anti-affinity) with, where co-located is
+ defined as running on a node whose value of the label
+ with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over
+ a set of resources. The result of matchLabels and
+ matchExpressions are ANDed. An empty label selector
+ matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty. If
+ the operator is Exists or DoesNotExist,
+ the values array must be empty. This array
+ is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is "In",
+ and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces
+ the labelSelector applies to (matches against);
+ null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods
+ matching the labelSelector in the specified namespaces,
+ where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches
+ that of any node on which any of the selected pods
+ is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding
+ podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - weight
+ - podAffinityTerm
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this
+ field are not met at scheduling time, the pod will not be
+ scheduled onto the node. If the affinity requirements specified
+ by this field cease to be met at some point during pod execution
+ (e.g. due to a pod label update), the system may or may not
+ try to eventually evict the pod from its node. When there
+ are multiple elements, the lists of nodes corresponding to
+ each podAffinityTerm are intersected, i.e. all terms must
+ be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s)) that
+ this pod should be co-located (affinity) or not co-located
+ (anti-affinity) with, where co-located is defined as running
+ on a node whose value of the label with key
+ matches that of any node on which a pod of the set of pods
+ is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over a
+ set of resources. The result of matchLabels and matchExpressions
+ are ANDed. An empty label selector matches all objects.
+ A null label selector matches no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values
+ array must be non-empty. If the operator is
+ Exists or DoesNotExist, the values array must
+ be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field
+ is "key", the operator is "In", and the values array
+ contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the
+ labelSelector applies to (matches against); null or
+ empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where
+ co-located is defined as running on a node whose value
+ of the label with key topologyKey matches that of any
+ node on which any of the selected pods is running. Empty
+ topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: Pod anti affinity is a group of inter pod anti affinity
+ scheduling rules.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes
+ that satisfy the anti-affinity expressions specified by this
+ field, but it may choose a node that violates one or more
+ of the expressions. The node that is most preferred is the
+ one with the greatest sum of weights, i.e. for each node that
+ meets all of the scheduling requirements (resource request,
+ requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field
+ and adding "weight" to the sum if the node has pods which
+ matches the corresponding podAffinityTerm; the node(s) with
+ the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s))
+ that this pod should be co-located (affinity) or not
+ co-located (anti-affinity) with, where co-located is
+ defined as running on a node whose value of the label
+ with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over
+ a set of resources. The result of matchLabels and
+ matchExpressions are ANDed. An empty label selector
+ matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty. If
+ the operator is Exists or DoesNotExist,
+ the values array must be empty. This array
+ is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is "In",
+ and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces
+ the labelSelector applies to (matches against);
+ null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods
+ matching the labelSelector in the specified namespaces,
+ where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches
+ that of any node on which any of the selected pods
+ is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding
+ podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - weight
+ - podAffinityTerm
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by
+ this field are not met at scheduling time, the pod will not
+ be scheduled onto the node. If the anti-affinity requirements
+ specified by this field cease to be met at some point during
+ pod execution (e.g. due to a pod label update), the system
+ may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding
+ to each podAffinityTerm are intersected, i.e. all terms must
+ be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s)) that
+ this pod should be co-located (affinity) or not co-located
+ (anti-affinity) with, where co-located is defined as running
+ on a node whose value of the label with key
+ matches that of any node on which a pod of the set of pods
+ is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over a
+ set of resources. The result of matchLabels and matchExpressions
+ are ANDed. An empty label selector matches all objects.
+ A null label selector matches no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In,
+ NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values.
+ If the operator is In or NotIn, the values
+ array must be non-empty. If the operator is
+ Exists or DoesNotExist, the values array must
+ be empty. This array is replaced during a
+ strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs.
+ A single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field
+ is "key", the operator is "In", and the values array
+ contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the
+ labelSelector applies to (matches against); null or
+ empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where
+ co-located is defined as running on a node whose value
+ of the label with key topologyKey matches that of any
+ node on which any of the selected pods is running. Empty
+ topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
backupType:
description: Type is the backup type for tidb cluster.
type: string
br:
description: BRConfig contains config for BR
properties:
- ca:
- description: CA is the CA certificate path for TLS connection
- type: string
- cert:
- description: Cert is the certificate path for TLS connection
- type: string
checksum:
description: Checksum specifies whether to run checksum after backup
type: boolean
- concurrency:
- description: Concurrency is the size of thread pool on each node
- that execute the backup task
- format: int64
- type: integer
- db:
- description: DB is the specific DB which will be backed-up or restored
- type: string
- key:
- description: Key is the private key path for TLS connection
- type: string
- logLevel:
- description: LogLevel is the log level
- type: string
- onLine:
- description: OnLine specifies whether online during restore
- type: boolean
- pd:
- description: PDAddress is the PD address of the tidb cluster
- type: string
- rateLimit:
- description: RateLimit is the rate limit of the backup task, MB/s
- per node
- format: int32
- type: integer
- sendCredToTikv:
- description: SendCredToTikv specifies whether to send credentials
- to TiKV
- type: boolean
- statusAddr:
- description: StatusAddr is the HTTP listening address for the status
- report service. Set to empty string to disable
- type: string
- table:
- description: Table is the specific table which will be backed-up
- or restored
- type: string
- timeAgo:
- description: TimeAgo is the history version of the backup task,
- e.g. 1m, 1h
- type: string
- required:
- - pd
- type: object
- from:
- description: TiDBAccessConfig defines the configuration for access tidb
- cluster
- properties:
- host:
- description: Host is the tidb cluster access address
- type: string
- port:
- description: Port is the port number to use for connecting tidb
- cluster
- format: int32
- type: integer
- secretName:
- description: SecretName is the name of secret which stores tidb
- cluster's password.
- type: string
- user:
- description: User is the user for login tidb cluster
- type: string
- required:
- - host
- - secretName
- type: object
- gcs:
- description: GcsStorageProvider represents the google cloud storage
- for storing backups.
- properties:
- bucket:
- description: Bucket in which to store the backup data.
- type: string
- bucketAcl:
- description: BucketAcl represents the access control list for new
- buckets
+ cluster:
+ description: ClusterName of backup/restore cluster
type: string
- location:
- description: Location in which the gcs bucket is located.
- type: string
- objectAcl:
- description: ObjectAcl represents the access control list for new
- objects
- type: string
- path:
- description: 'Path is the full path where the backup is saved. The
- format of the path must be: "/"'
- type: string
- projectId:
- description: ProjectId represents the project that organizes all
- your Google Cloud Platform resources
- type: string
- secretName:
- description: SecretName is the name of secret which stores the gcs
- service account credentials JSON .
- type: string
- storageClass:
- description: StorageClass represents the storage class
- type: string
- required:
- - projectId
- - secretName
- type: object
- s3:
- description: S3StorageProvider represents a S3 compliant storage for
- storing backups.
- properties:
- acl:
- description: Acl represents access control permissions for this
- bucket
- type: string
- bucket:
- description: Bucket in which to store the backup data.
- type: string
- endpoint:
- description: Endpoint of S3 compatible storage service
- type: string
- path:
- description: 'Path is the full path where the backup is saved. The
- format of the path must be: "/"'
- type: string
- prefix:
- description: Prefix for the keys.
- type: string
- provider:
- description: Provider represents the specific storage provider that
- implements the S3 interface
- type: string
- region:
- description: Region in which the S3 compatible bucket is located.
- type: string
- secretName:
- description: SecretName is the name of secret which stores S3 compliant
- storage access key and secret key.
- type: string
- sse:
- description: SSE Sever-Side Encryption.
- type: string
- storageClass:
- description: StorageClass represents the storage class
- type: string
- required:
- - provider
- - secretName
- type: object
- storageClassName:
- description: The storageClassName of the persistent volume for Backup
- data storage. Defaults to Kubernetes default storage class.
- type: string
- storageSize:
- description: StorageSize is the request storage size for backup job
- type: string
- type: object
- type: object
- version: v1alpha1
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- creationTimestamp: null
- name: restores.pingcap.com
-spec:
- additionalPrinterColumns:
- - JSONPath: .status.timeStarted
- description: The time at which the backup was started
- name: Started
- type: date
- - JSONPath: .status.timeCompleted
- description: The time at which the restore was completed
- name: Completed
- type: date
- group: pingcap.com
- names:
- kind: Restore
- plural: restores
- shortNames:
- - rt
- scope: Namespaced
- validation:
- openAPIV3Schema:
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- spec:
- description: RestoreSpec contains the specification for a restore of a tidb
- cluster backup.
- properties:
- backupType:
- description: Type is the backup type for tidb cluster.
- type: string
- br:
- description: BRConfig contains config for BR
- properties:
- ca:
- description: CA is the CA certificate path for TLS connection
+ clusterNamespace:
+ description: Namespace of backup/restore cluster
type: string
- cert:
- description: Cert is the certificate path for TLS connection
- type: string
- checksum:
- description: Checksum specifies whether to run checksum after backup
- type: boolean
concurrency:
description: Concurrency is the size of thread pool on each node
that execute the backup task
@@ -6389,18 +9249,12 @@ spec:
db:
description: DB is the specific DB which will be backed-up or restored
type: string
- key:
- description: Key is the private key path for TLS connection
- type: string
logLevel:
description: LogLevel is the log level
type: string
onLine:
description: OnLine specifies whether online during restore
type: boolean
- pd:
- description: PDAddress is the PD address of the tidb cluster
- type: string
rateLimit:
description: RateLimit is the rate limit of the backup task, MB/s
per node
@@ -6423,7 +9277,7 @@ spec:
e.g. 1m, 1h
type: string
required:
- - pd
+ - cluster
type: object
gcs:
description: GcsStorageProvider represents the google cloud storage
@@ -6502,8 +9356,10 @@ spec:
type: string
required:
- provider
- - secretName
type: object
+ serviceAccount:
+ description: Specify service account of restore
+ type: string
storageClassName:
description: The storageClassName of the persistent volume for Restore
data storage. Defaults to Kubernetes default storage class.
@@ -6511,6 +9367,12 @@ spec:
storageSize:
description: StorageSize is the request storage size for backup job
type: string
+ tikvGCLifeTime:
+ description: TikvGCLifeTime is to specify the safe gc life time for
+ restore. The time limit during which data is retained for each GC,
+ in the format of Go Duration. When a GC happens, the current time
+ minus this value is the safe point.
+ type: string
to:
description: TiDBAccessConfig defines the configuration for access tidb
cluster
@@ -6527,6 +9389,7 @@ spec:
description: SecretName is the name of secret which stores tidb
cluster's password.
type: string
+ tlsClient: {}
user:
description: User is the user for login tidb cluster
type: string
@@ -6534,6 +9397,50 @@ spec:
- host
- secretName
type: object
+ tolerations:
+ description: Base tolerations of restore Pods, components may add more
+ tolerations upon this respectively
+ items:
+ description: The pod this Toleration is attached to tolerates any
+ taint that matches the triple using the matching
+ operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty
+ means match all taint effects. When specified, allowed values
+ are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies
+ to. Empty means match all taint keys. If the key is empty, operator
+ must be Exists; this combination means to match all values and
+ all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal. Exists
+ is equivalent to wildcard for value, so that a pod can tolerate
+ all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the
+ toleration (which must be of effect NoExecute, otherwise this
+ field is ignored) tolerates the taint. By default, it is not
+ set, which means tolerate the taint forever (do not evict).
+ Zero and negative values will be treated as 0 (evict immediately)
+ by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise
+ just a regular string.
+ type: string
+ type: object
+ type: array
+ useKMS:
+ description: Use KMS to decrypt the secrets
+ type: boolean
type: object
type: object
version: v1alpha1
@@ -6563,6 +9470,9 @@ spec:
name: LastBackupTime
priority: 1
type: date
+ - JSONPath: .metadata.creationTimestamp
+ name: Age
+ type: date
group: pingcap.com
names:
kind: BackupSchedule
@@ -6591,22 +9501,633 @@ spec:
description: BackupSpec contains the backup specification for a tidb
cluster.
properties:
+ affinity:
+ description: Affinity is a group of affinity scheduling rules.
+ properties:
+ nodeAffinity:
+ description: Node affinity is a group of node affinity scheduling
+ rules.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods
+ to nodes that satisfy the affinity expressions specified
+ by this field, but it may choose a node that violates
+ one or more of the expressions. The node that is most
+ preferred is the one with the greatest sum of weights,
+ i.e. for each node that meets all of the scheduling requirements
+ (resource request, requiredDuringScheduling affinity expressions,
+ etc.), compute a sum by iterating through the elements
+ of this field and adding "weight" to the sum if the node
+ matches the corresponding matchExpressions; the node(s)
+ with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches
+ all objects with implicit weight 0 (i.e. it's a no-op).
+ A null preferred scheduling term matches no objects
+ (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A null or empty node selector term matches
+ no objects. The requirements of them are ANDed.
+ The TopologySelectorTerm type implements a subset
+ of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: A node selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are
+ In, NotIn, Exists, DoesNotExist. Gt, and
+ Lt.
+ type: string
+ values:
+ description: An array of string values.
+ If the operator is In or NotIn, the values
+ array must be non-empty. If the operator
+ is Exists or DoesNotExist, the values
+ array must be empty. If the operator is
+ Gt or Lt, the values array must have a
+ single element, which will be interpreted
+ as an integer. This array is replaced
+ during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: A node selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are
+ In, NotIn, Exists, DoesNotExist. Gt, and
+ Lt.
+ type: string
+ values:
+ description: An array of string values.
+ If the operator is In or NotIn, the values
+ array must be non-empty. If the operator
+ is Exists or DoesNotExist, the values
+ array must be empty. If the operator is
+ Gt or Lt, the values array must have a
+ single element, which will be interpreted
+ as an integer. This array is replaced
+ during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding
+ nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - weight
+ - preference
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: A node selector represents the union of the
+ results of one or more label queries over a set of nodes;
+ that is, it represents the OR of the selectors represented
+ by the node selector terms.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms.
+ The terms are ORed.
+ items:
+ description: A null or empty node selector term matches
+ no objects. The requirements of them are ANDed.
+ The TopologySelectorTerm type implements a subset
+ of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: A node selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are
+ In, NotIn, Exists, DoesNotExist. Gt, and
+ Lt.
+ type: string
+ values:
+ description: An array of string values.
+ If the operator is In or NotIn, the values
+ array must be non-empty. If the operator
+ is Exists or DoesNotExist, the values
+ array must be empty. If the operator is
+ Gt or Lt, the values array must have a
+ single element, which will be interpreted
+ as an integer. This array is replaced
+ during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: A node selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship
+ to a set of values. Valid operators are
+ In, NotIn, Exists, DoesNotExist. Gt, and
+ Lt.
+ type: string
+ values:
+ description: An array of string values.
+ If the operator is In or NotIn, the values
+ array must be non-empty. If the operator
+ is Exists or DoesNotExist, the values
+ array must be empty. If the operator is
+ Gt or Lt, the values array must have a
+ single element, which will be interpreted
+ as an integer. This array is replaced
+ during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: Pod affinity is a group of inter pod affinity scheduling
+ rules.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods
+ to nodes that satisfy the affinity expressions specified
+ by this field, but it may choose a node that violates
+ one or more of the expressions. The node that is most
+ preferred is the one with the greatest sum of weights,
+ i.e. for each node that meets all of the scheduling requirements
+ (resource request, requiredDuringScheduling affinity expressions,
+ etc.), compute a sum by iterating through the elements
+ of this field and adding "weight" to the sum if the node
+ has pods which matches the corresponding podAffinityTerm;
+ the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s))
+ that this pod should be co-located (affinity) or
+ not co-located (anti-affinity) with, where co-located
+ is defined as running on a node whose value of the
+ label with key matches that of any
+ node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query
+ over a set of resources. The result of matchLabels
+ and matchExpressions are ANDed. An empty label
+ selector matches all objects. A null label selector
+ matches no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of
+ label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: A label selector requirement
+ is a selector that contains values, a
+ key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and
+ DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty.
+ If the operator is Exists or DoesNotExist,
+ the values array must be empty. This
+ array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is
+ "In", and the values array contains only
+ "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces
+ the labelSelector applies to (matches against);
+ null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods
+ matching the labelSelector in the specified
+ namespaces, where co-located is defined as running
+ on a node whose value of the label with key
+ topologyKey matches that of any node on which
+ any of the selected pods is running. Empty topologyKey
+ is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding
+ podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - weight
+ - podAffinityTerm
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this
+ field are not met at scheduling time, the pod will not
+ be scheduled onto the node. If the affinity requirements
+ specified by this field cease to be met at some point
+ during pod execution (e.g. due to a pod label update),
+ the system may or may not try to eventually evict the
+ pod from its node. When there are multiple elements, the
+ lists of nodes corresponding to each podAffinityTerm are
+ intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s))
+ that this pod should be co-located (affinity) or not
+ co-located (anti-affinity) with, where co-located is
+ defined as running on a node whose value of the label
+ with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over
+ a set of resources. The result of matchLabels and
+ matchExpressions are ANDed. An empty label selector
+ matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty. If
+ the operator is Exists or DoesNotExist,
+ the values array must be empty. This array
+ is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is "In",
+ and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces
+ the labelSelector applies to (matches against);
+ null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods
+ matching the labelSelector in the specified namespaces,
+ where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches
+ that of any node on which any of the selected pods
+ is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: Pod anti affinity is a group of inter pod anti
+ affinity scheduling rules.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods
+ to nodes that satisfy the anti-affinity expressions specified
+ by this field, but it may choose a node that violates
+ one or more of the expressions. The node that is most
+ preferred is the one with the greatest sum of weights,
+ i.e. for each node that meets all of the scheduling requirements
+ (resource request, requiredDuringScheduling anti-affinity
+ expressions, etc.), compute a sum by iterating through
+ the elements of this field and adding "weight" to the
+ sum if the node has pods which matches the corresponding
+ podAffinityTerm; the node(s) with the highest sum are
+ the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s))
+ that this pod should be co-located (affinity) or
+ not co-located (anti-affinity) with, where co-located
+ is defined as running on a node whose value of the
+ label with key matches that of any
+ node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query
+ over a set of resources. The result of matchLabels
+ and matchExpressions are ANDed. An empty label
+ selector matches all objects. A null label selector
+ matches no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of
+ label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: A label selector requirement
+ is a selector that contains values, a
+ key, and an operator that relates the
+ key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and
+ DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty.
+ If the operator is Exists or DoesNotExist,
+ the values array must be empty. This
+ array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is
+ "In", and the values array contains only
+ "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces
+ the labelSelector applies to (matches against);
+ null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods
+ matching the labelSelector in the specified
+ namespaces, where co-located is defined as running
+ on a node whose value of the label with key
+ topologyKey matches that of any node on which
+ any of the selected pods is running. Empty topologyKey
+ is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding
+ podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - weight
+ - podAffinityTerm
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified
+ by this field are not met at scheduling time, the pod
+ will not be scheduled onto the node. If the anti-affinity
+ requirements specified by this field cease to be met at
+ some point during pod execution (e.g. due to a pod label
+ update), the system may or may not try to eventually evict
+ the pod from its node. When there are multiple elements,
+ the lists of nodes corresponding to each podAffinityTerm
+ are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching
+ the labelSelector relative to the given namespace(s))
+ that this pod should be co-located (affinity) or not
+ co-located (anti-affinity) with, where co-located is
+ defined as running on a node whose value of the label
+ with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label selector is a label query over
+ a set of resources. The result of matchLabels and
+ matchExpressions are ANDed. An empty label selector
+ matches all objects. A null label selector matches
+ no objects.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: A label selector requirement is
+ a selector that contains values, a key, and
+ an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's
+ relationship to a set of values. Valid
+ operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string
+ values. If the operator is In or NotIn,
+ the values array must be non-empty. If
+ the operator is Exists or DoesNotExist,
+ the values array must be empty. This array
+ is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ description: matchLabels is a map of {key,value}
+ pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions,
+ whose key field is "key", the operator is "In",
+ and the values array contains only "value".
+ The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces
+ the labelSelector applies to (matches against);
+ null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity)
+ or not co-located (anti-affinity) with the pods
+ matching the labelSelector in the specified namespaces,
+ where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches
+ that of any node on which any of the selected pods
+ is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
backupType:
description: Type is the backup type for tidb cluster.
type: string
br:
description: BRConfig contains config for BR
properties:
- ca:
- description: CA is the CA certificate path for TLS connection
- type: string
- cert:
- description: Cert is the certificate path for TLS connection
- type: string
checksum:
description: Checksum specifies whether to run checksum after
backup
type: boolean
+ cluster:
+ description: ClusterName of backup/restore cluster
+ type: string
+ clusterNamespace:
+ description: Namespace of backup/restore cluster
+ type: string
concurrency:
description: Concurrency is the size of thread pool on each
node that execute the backup task
@@ -6616,18 +10137,12 @@ spec:
description: DB is the specific DB which will be backed-up or
restored
type: string
- key:
- description: Key is the private key path for TLS connection
- type: string
logLevel:
description: LogLevel is the log level
type: string
onLine:
description: OnLine specifies whether online during restore
type: boolean
- pd:
- description: PDAddress is the PD address of the tidb cluster
- type: string
rateLimit:
description: RateLimit is the rate limit of the backup task,
MB/s per node
@@ -6650,7 +10165,7 @@ spec:
e.g. 1m, 1h
type: string
required:
- - pd
+ - cluster
type: object
from:
description: TiDBAccessConfig defines the configuration for access
@@ -6668,6 +10183,7 @@ spec:
description: SecretName is the name of secret which stores tidb
cluster's password.
type: string
+ tlsClient: {}
user:
description: User is the user for login tidb cluster
type: string
@@ -6752,8 +10268,10 @@ spec:
type: string
required:
- provider
- - secretName
type: object
+ serviceAccount:
+ description: Specify service account of backup
+ type: string
storageClassName:
description: The storageClassName of the persistent volume for Backup
data storage. Defaults to Kubernetes default storage class.
@@ -6762,6 +10280,56 @@ spec:
description: StorageSize is the request storage size for backup
job
type: string
+ tikvGCLifeTime:
+ description: TikvGCLifeTime is to specify the safe gc life time
+ for backup. The time limit during which data is retained for each
+ GC, in the format of Go Duration. When a GC happens, the current
+ time minus this value is the safe point.
+ type: string
+ tolerations:
+ description: Base tolerations of backup Pods, components may add
+ more tolerations upon this respectively
+ items:
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple using the
+ matching operator .
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty
+ means match all taint effects. When specified, allowed values
+ are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies
+ to. Empty means match all taint keys. If the key is empty,
+ operator must be Exists; this combination means to match
+ all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the
+ value. Valid operators are Exists and Equal. Defaults to
+ Equal. Exists is equivalent to wildcard for value, so that
+ a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time
+ the toleration (which must be of effect NoExecute, otherwise
+ this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do
+ not evict). Zero and negative values will be treated as
+ 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches
+ to. If the operator is Exists, the value should be empty,
+ otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ useKMS:
+ description: Use KMS to decrypt the secrets
+ type: boolean
type: object
maxBackups:
description: MaxBackups is to specify how many backups we want to keep
@@ -6920,6 +10488,9 @@ spec:
name: Phase
priority: 1
type: string
+ - JSONPath: .metadata.creationTimestamp
+ name: Age
+ type: date
group: pingcap.com
names:
kind: TidbInitializer
@@ -7005,6 +10576,26 @@ metadata:
creationTimestamp: null
name: tidbclusterautoscalers.pingcap.com
spec:
+ additionalPrinterColumns:
+ - JSONPath: .spec.tidb.maxReplicas
+ description: The maximal replicas of TiDB
+ name: TiDB-MaxReplicas
+ type: integer
+ - JSONPath: .spec.tidb.minReplicas
+ description: The minimal replicas of TiDB
+ name: TiDB-MinReplicas
+ type: integer
+ - JSONPath: .spec.tikv.maxReplicas
+ description: The maximal replicas of TiKV
+ name: TiKV-MaxReplicas
+ type: integer
+ - JSONPath: .spec.tikv.minReplicas
+ description: The minimal replicas of TiKV
+ name: TiKV-MinReplicas
+ type: integer
+ - JSONPath: .metadata.creationTimestamp
+ name: Age
+ type: date
group: pingcap.com
names:
kind: TidbClusterAutoScaler
@@ -7046,6 +10637,19 @@ spec:
the pd could provide it. MetricsUrl represents the url to fetch the
metrics info
type: string
+ monitor:
+ description: TidbMonitorRef reference to a TidbMonitor
+ properties:
+ name:
+ description: Name is the name of TidbMonitor object
+ type: string
+ namespace:
+ description: Namespace is the namespace that TidbMonitor object
+ locates, default to the same namespace with TidbClusterAutoScaler
+ type: string
+ required:
+ - name
+ type: object
tidb:
description: TidbAutoScalerSpec describes the spec for tidb auto-scaling
properties:
@@ -7067,6 +10671,10 @@ spec:
metric will be set to 80% average CPU utilization.
items: {}
type: array
+ metricsTimeDuration:
+ description: MetricsTimeDuration describe the Time duration to be
+ queried in the Prometheus
+ type: string
minReplicas:
description: minReplicas is the lower limit for the number of replicas
to which the autoscaler can scale down. It defaults to 1 pod.
@@ -7079,12 +10687,26 @@ spec:
will be set to 500
format: int32
type: integer
+ scaleInThreshold:
+ description: ScaleInThreshold describe the consecutive threshold
+ for the auto-scaling, if the consecutive counts of the scale-in
+ result in auto-scaling reach this number, the auto-scaling would
+ be performed. If not set, the default value is 5.
+ format: int32
+ type: integer
scaleOutIntervalSeconds:
description: ScaleOutIntervalSeconds represents the duration seconds
between each auto-scaling-out If not set, the default ScaleOutIntervalSeconds
will be set to 300
format: int32
type: integer
+ scaleOutThreshold:
+ description: ScaleOutThreshold describe the consecutive threshold
+ for the auto-scaling, if the consecutive counts of the scale-out
+ result in auto-scaling reach this number, the auto-scaling would
+ be performed. If not set, the default value is 3.
+ format: int32
+ type: integer
required:
- maxReplicas
type: object
@@ -7109,6 +10731,10 @@ spec:
metric will be set to 80% average CPU utilization.
items: {}
type: array
+ metricsTimeDuration:
+ description: MetricsTimeDuration describe the Time duration to be
+ queried in the Prometheus
+ type: string
minReplicas:
description: minReplicas is the lower limit for the number of replicas
to which the autoscaler can scale down. It defaults to 1 pod.
@@ -7121,18 +10747,129 @@ spec:
will be set to 500
format: int32
type: integer
+ scaleInThreshold:
+ description: ScaleInThreshold describe the consecutive threshold
+ for the auto-scaling, if the consecutive counts of the scale-in
+ result in auto-scaling reach this number, the auto-scaling would
+ be performed. If not set, the default value is 5.
+ format: int32
+ type: integer
scaleOutIntervalSeconds:
description: ScaleOutIntervalSeconds represents the duration seconds
between each auto-scaling-out If not set, the default ScaleOutIntervalSeconds
will be set to 300
format: int32
type: integer
+ scaleOutThreshold:
+ description: ScaleOutThreshold describe the consecutive threshold
+ for the auto-scaling, if the consecutive counts of the scale-out
+ result in auto-scaling reach this number, the auto-scaling would
+ be performed. If not set, the default value is 3.
+ format: int32
+ type: integer
required:
- maxReplicas
type: object
required:
- cluster
type: object
- status: {}
+ status:
+ description: TidbClusterAutoSclaerStatus describe the whole status
+ properties:
+ tidb:
+ description: TidbAutoScalerStatus describe the auto-scaling status of
+ tidb
+ properties:
+ currentReplicas:
+ description: CurrentReplicas describes the current replicas for
+ the component(tidb/tikv)
+ format: int32
+ type: integer
+ lastAutoScalingTimestamp:
+ description: Time is a wrapper around time.Time which supports correct
+ marshaling to YAML and JSON. Wrappers are provided for many of
+ the factory methods that the time package offers.
+ format: date-time
+ type: string
+ metrics:
+ description: MetricsStatusList describes the metrics status in the
+ last auto-scaling reconciliation
+ items:
+ description: MetricsStatus describe the basic metrics status in
+ the last auto-scaling reconciliation
+ properties:
+ currentValue:
+ description: CurrentValue indicates the value calculated in
+ the last auto-scaling reconciliation
+ type: string
+ name:
+ description: Name indicates the metrics name
+ type: string
+ thresholdValue:
+ description: TargetValue indicates the threshold value for
+ this metrics in auto-scaling
+ type: string
+ required:
+ - name
+ - currentValue
+ - thresholdValue
+ type: object
+ type: array
+ recommendedReplicas:
+ description: RecommendedReplicas describes the calculated replicas
+ in the last auto-scaling reconciliation for the component(tidb/tikv)
+ format: int32
+ type: integer
+ required:
+ - currentReplicas
+ type: object
+ tikv:
+ description: TikvAutoScalerStatus describe the auto-scaling status of
+ tikv
+ properties:
+ currentReplicas:
+ description: CurrentReplicas describes the current replicas for
+ the component(tidb/tikv)
+ format: int32
+ type: integer
+ lastAutoScalingTimestamp:
+ description: Time is a wrapper around time.Time which supports correct
+ marshaling to YAML and JSON. Wrappers are provided for many of
+ the factory methods that the time package offers.
+ format: date-time
+ type: string
+ metrics:
+ description: MetricsStatusList describes the metrics status in the
+ last auto-scaling reconciliation
+ items:
+ description: MetricsStatus describe the basic metrics status in
+ the last auto-scaling reconciliation
+ properties:
+ currentValue:
+ description: CurrentValue indicates the value calculated in
+ the last auto-scaling reconciliation
+ type: string
+ name:
+ description: Name indicates the metrics name
+ type: string
+ thresholdValue:
+ description: TargetValue indicates the threshold value for
+ this metrics in auto-scaling
+ type: string
+ required:
+ - name
+ - currentValue
+ - thresholdValue
+ type: object
+ type: array
+ recommendedReplicas:
+ description: RecommendedReplicas describes the calculated replicas
+ in the last auto-scaling reconciliation for the component(tidb/tikv)
+ format: int32
+ type: integer
+ required:
+ - currentReplicas
+ type: object
+ type: object
type: object
version: v1alpha1
diff --git a/manifests/dm/README.md b/manifests/dm/README.md
new file mode 100644
index 0000000000..5fb0b9ee9f
--- /dev/null
+++ b/manifests/dm/README.md
@@ -0,0 +1,48 @@
+---
+title: Deploy DM on Kubernetes
+summary: Deploy DM on Kubernetes
+category: how-to
+---
+
+# Deploy DM on Kubernetes
+
+This document describes how to deploy DM of the new HA architecture with the yamls in this directory.
+
+## Deploy dm-master
+
+Update the rpc configs if necessary in `master/config/config.toml`.
+
+{{< copyable "shell-regular" >}}
+
+``` shell
+kubectl apply -k master -n
+```
+
+> **Note: **
+>
+> - `3` replicas are deployed by default.
+> - `storageClassName` is set to `local-storage` by default.
+
+## Deploy dm-worker
+
+- If you only need to use DM for incremental data migration, no need to create PVC for dm-worker, just deploy it with below command:
+
+ {{< copyable "shell-regular" >}}
+
+ ``` shell
+ kubectl apply -k worker/base -n
+ ```
+
+- If you need to use DM for both full and incremental data migration, you have to create PVC for dm-worker, deploy it with below command:
+
+ {{< copyable "shell-regular" >}}
+
+ ``` shell
+ kubectl apply -k worker/overlays/full -n
+ ```
+
+> **Note: **
+>
+> - `3` replicas are deployed by default.
+> - `storageClassName` is set to `local-storage` for PVC by default.
+> - If PVCs are created, they are mounted to `/data` directory.
diff --git a/manifests/dm/master/config/config.toml b/manifests/dm/master/config/config.toml
new file mode 100644
index 0000000000..b2e265aa59
--- /dev/null
+++ b/manifests/dm/master/config/config.toml
@@ -0,0 +1,14 @@
+# rpc configuration
+#
+# rpc timeout is a positive number plus time unit. we use golang standard time
+# units including: "ns", "us", "ms", "s", "m", "h". You should provide a proper
+# rpc timeout according to your use scenario.
+rpc-timeout = "30s"
+# rpc limiter controls how frequently events are allowed to happen.
+# It implements a "token bucket" of size `rpc-rate-limit`, initially full and
+# refilled at rate `rpc-rate-limit` tokens per second. Note `rpc-rate-limit`
+# is float64 type, so remember to add a decimal point and one trailing 0 if its
+# literal value happens to be an integer.
+rpc-rate-limit = 10.0
+rpc-rate-burst = 40
+
diff --git a/manifests/dm/master/dm-master.yaml b/manifests/dm/master/dm-master.yaml
new file mode 100644
index 0000000000..c19c9a4e2d
--- /dev/null
+++ b/manifests/dm/master/dm-master.yaml
@@ -0,0 +1,148 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ labels:
+ app.kubernetes.io/component: dm-master
+ name: dm-master
+spec:
+ podManagementPolicy: Parallel
+ replicas: 3
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: dm-master
+ serviceName: dm-master-peer
+ template:
+ metadata:
+ annotations:
+ prometheus.io/path: /metrics
+ prometheus.io/port: "8261"
+ prometheus.io/scrape: "true"
+ creationTimestamp: null
+ labels:
+ app.kubernetes.io/component: dm-master
+ spec:
+ affinity: {}
+ containers:
+ - command:
+ - /dm-master
+ - --data-dir=/data
+ - --config=/etc/config/config.toml
+ - --name=$(MY_POD_NAME)
+ - --master-addr=:8261
+ - --advertise-addr=$(MY_POD_NAME).$(PEER_SERVICE_NAME).$(NAMESPACE):8261
+ - --peer-urls=:8291
+ - --advertise-peer-urls=http://$(MY_POD_NAME).$(PEER_SERVICE_NAME).$(NAMESPACE):8291
+ - --initial-cluster=dm-master-0=http://dm-master-0.$(PEER_SERVICE_NAME).$(NAMESPACE):8291,dm-master-1=http://dm-master-1.$(PEER_SERVICE_NAME).$(NAMESPACE):8291,dm-master-2=http://dm-master-2.$(PEER_SERVICE_NAME).$(NAMESPACE):8291
+ env:
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.name
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ - name: PEER_SERVICE_NAME
+ value: dm-master-peer
+ - name: SERVICE_NAME
+ value: dm-master
+ - name: TZ
+ value: UTC
+ image: pingcap/dm:ha-alpha
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 8
+ httpGet:
+ path: /status
+ port: 8261
+ scheme: HTTP
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /status
+ port: 8261
+ scheme: HTTP
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ name: master
+ ports:
+ - containerPort: 8291
+ name: server
+ protocol: TCP
+ - containerPort: 8261
+ name: client
+ protocol: TCP
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /data
+ name: data
+ - mountPath: /etc/config
+ name: config
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ schedulerName: default-scheduler
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - configMap:
+ name: dm-master-config
+ name: config
+ updateStrategy:
+ rollingUpdate:
+ partition: 3
+ type: RollingUpdate
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ dataSource: null
+ resources:
+ requests:
+ storage: 10Gi
+ storageClassName: local-storage
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/component: dm-master
+ name: dm-master
+spec:
+ ports:
+ - name: client
+ port: 8261
+ protocol: TCP
+ targetPort: 8261
+ selector:
+ app.kubernetes.io/component: dm-master
+ sessionAffinity: None
+ type: ClusterIP
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/component: dm-master
+ name: dm-master-peer
+spec:
+ clusterIP: None
+ ports:
+ - name: peer
+ port: 8291
+ protocol: TCP
+ targetPort: 8291
+ selector:
+ app.kubernetes.io/component: dm-master
+ publishNotReadyAddresses: true
+ sessionAffinity: None
+ type: ClusterIP
+
diff --git a/manifests/dm/master/kustomization.yaml b/manifests/dm/master/kustomization.yaml
new file mode 100644
index 0000000000..38a802b317
--- /dev/null
+++ b/manifests/dm/master/kustomization.yaml
@@ -0,0 +1,10 @@
+resources:
+- dm-master.yaml
+configMapGenerator:
+- name: dm-master-config
+ files:
+ - config/config.toml
+generatorOptions:
+ labels:
+ app.kubernetes.io/component: dm-master
+
diff --git a/manifests/dm/worker/base/dm-worker.yaml b/manifests/dm/worker/base/dm-worker.yaml
new file mode 100644
index 0000000000..a4b8215b09
--- /dev/null
+++ b/manifests/dm/worker/base/dm-worker.yaml
@@ -0,0 +1,121 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ labels:
+ app.kubernetes.io/component: dm-worker
+ name: dm-worker
+spec:
+ podManagementPolicy: Parallel
+ replicas: 3
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: dm-worker
+ serviceName: dm-worker-peer
+ template:
+ metadata:
+ annotations:
+ prometheus.io/path: /metrics
+ prometheus.io/port: "8262"
+ prometheus.io/scrape: "true"
+ creationTimestamp: null
+ labels:
+ app.kubernetes.io/component: dm-worker
+ spec:
+ affinity: {}
+ containers:
+ - command:
+ - /dm-worker
+ - --name=$(MY_POD_NAME)
+ - --worker-addr=:8262
+ - --advertise-addr=$(MY_POD_NAME).$(PEER_SERVICE_NAME).$(NAMESPACE):8262
+ - --join=dm-master.$(NAMESPACE):8261
+ env:
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.name
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ - name: PEER_SERVICE_NAME
+ value: dm-worker-peer
+ - name: SERVICE_NAME
+ value: dm-worker
+ - name: TZ
+ value: UTC
+ image: pingcap/dm:ha-alpha
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 8
+ httpGet:
+ path: /status
+ port: 8262
+ scheme: HTTP
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /status
+ port: 8262
+ scheme: HTTP
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ name: worker
+ ports:
+ - containerPort: 8262
+ name: client
+ protocol: TCP
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ schedulerName: default-scheduler
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
+ updateStrategy:
+ rollingUpdate:
+ partition: 3
+ type: RollingUpdate
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/component: dm-worker
+ name: dm-worker
+spec:
+ ports:
+ - name: client
+ port: 8262
+ protocol: TCP
+ targetPort: 8262
+ selector:
+ app.kubernetes.io/component: dm-worker
+ sessionAffinity: None
+ type: ClusterIP
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/component: dm-worker
+ name: dm-worker-peer
+spec:
+ clusterIP: None
+ ports:
+ - name: peer
+ port: 8262
+ protocol: TCP
+ targetPort: 8262
+ selector:
+ app.kubernetes.io/component: dm-worker
+ publishNotReadyAddresses: true
+ sessionAffinity: None
+ type: ClusterIP
+
diff --git a/manifests/dm/worker/base/kustomization.yaml b/manifests/dm/worker/base/kustomization.yaml
new file mode 100644
index 0000000000..ccfbc34109
--- /dev/null
+++ b/manifests/dm/worker/base/kustomization.yaml
@@ -0,0 +1,2 @@
+resources:
+- dm-worker.yaml
diff --git a/manifests/dm/worker/overlays/full/dm-worker-pvc.yaml b/manifests/dm/worker/overlays/full/dm-worker-pvc.yaml
new file mode 100644
index 0000000000..b84cd43da8
--- /dev/null
+++ b/manifests/dm/worker/overlays/full/dm-worker-pvc.yaml
@@ -0,0 +1,23 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: dm-worker
+spec:
+ template:
+ spec:
+ containers:
+ - name: worker
+ volumeMounts:
+ - mountPath: /data
+ name: data
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ dataSource: null
+ resources:
+ requests:
+ storage: 10Gi
+ storageClassName: local-storage
diff --git a/manifests/dm/worker/overlays/full/kustomization.yaml b/manifests/dm/worker/overlays/full/kustomization.yaml
new file mode 100644
index 0000000000..473fec2a6a
--- /dev/null
+++ b/manifests/dm/worker/overlays/full/kustomization.yaml
@@ -0,0 +1,4 @@
+bases:
+- ../../base
+patches:
+- dm-worker-pvc.yaml
diff --git a/manifests/gke/local-ssd-provision/local-ssd-provision.yaml b/manifests/gke/local-ssd-provision/local-ssd-provision.yaml
index 18229b223d..0798ba0413 100644
--- a/manifests/gke/local-ssd-provision/local-ssd-provision.yaml
+++ b/manifests/gke/local-ssd-provision/local-ssd-provision.yaml
@@ -12,6 +12,7 @@ metadata:
name: local-provisioner-config
namespace: kube-system
data:
+ setPVOwnerRef: "true"
nodeLabelsForPV: |
- kubernetes.io/hostname
storageClassMap: |
diff --git a/manifests/local-dind/local-volume-provisioner.yaml b/manifests/local-dind/local-volume-provisioner.yaml
index df1e99e2c2..ffc3c28342 100644
--- a/manifests/local-dind/local-volume-provisioner.yaml
+++ b/manifests/local-dind/local-volume-provisioner.yaml
@@ -12,6 +12,7 @@ metadata:
name: local-provisioner-config
namespace: kube-system
data:
+ setPVOwnerRef: "true"
nodeLabelsForPV: |
- kubernetes.io/hostname
storageClassMap: |
@@ -38,7 +39,7 @@ spec:
spec:
serviceAccountName: local-storage-admin
containers:
- - image: "quay.io/external_storage/local-volume-provisioner:v2.3.2"
+ - image: "quay.io/external_storage/local-volume-provisioner:v2.3.4"
name: provisioner
securityContext:
privileged: true
@@ -52,7 +53,7 @@ spec:
fieldRef:
fieldPath: metadata.namespace
- name: JOB_CONTAINER_IMAGE
- value: "quay.io/external_storage/local-volume-provisioner:v2.3.2"
+ value: "quay.io/external_storage/local-volume-provisioner:v2.3.4"
resources:
requests:
cpu: 100m
diff --git a/manifests/monitor/tidb-monitor.yaml b/manifests/monitor/tidb-monitor.yaml
new file mode 100644
index 0000000000..06850363a2
--- /dev/null
+++ b/manifests/monitor/tidb-monitor.yaml
@@ -0,0 +1,83 @@
+apiVersion: pingcap.com/v1alpha1
+kind: TidbMonitor
+metadata:
+ name: demo
+spec:
+ clusters:
+ - name: demo
+ prometheus:
+ baseImage: prom/prometheus
+ version: v2.11.1
+ resources: {}
+ # limits:
+ # cpu: 8000m
+ # memory: 8Gi
+ # requests:
+ # cpu: 4000m
+ # memory: 4Gi
+ imagePullPolicy: IfNotPresent
+ logLevel: info
+ reserveDays: 12
+ service:
+ type: NodePort
+ portName: http-prometheus
+ grafana:
+ baseImage: grafana/grafana
+ version: 6.0.1
+ imagePullPolicy: IfNotPresent
+ logLevel: info
+ resources: {}
+ # limits:
+ # cpu: 8000m
+ # memory: 8Gi
+ # requests:
+ # cpu: 4000m
+ # memory: 4Gi
+ username: admin
+ password: admin
+ envs:
+ # Configure Grafana using environment variables except GF_PATHS_DATA, GF_SECURITY_ADMIN_USER and GF_SECURITY_ADMIN_PASSWORD
+ # Ref https://grafana.com/docs/installation/configuration/#using-environment-variables
+ GF_AUTH_ANONYMOUS_ENABLED: "true"
+ GF_AUTH_ANONYMOUS_ORG_NAME: "Main Org."
+ GF_AUTH_ANONYMOUS_ORG_ROLE: "Viewer"
+ # if grafana is running behind a reverse proxy with subpath http://foo.bar/grafana
+ # GF_SERVER_DOMAIN: foo.bar
+ # GF_SERVER_ROOT_URL: "%(protocol)s://%(domain)s/grafana/"
+ service:
+ type: NodePort
+ portName: http-grafana
+ initializer:
+ baseImage: pingcap/tidb-monitor-initializer
+ version: v3.0.9
+ imagePullPolicy: Always
+ resources: {}
+ # limits:
+ # cpu: 50m
+ # memory: 64Mi
+ # requests:
+ # cpu: 50m
+ # memory: 64Mi
+ reloader:
+ baseImage: pingcap/tidb-monitor-reloader
+ version: v1.0.1
+ imagePullPolicy: IfNotPresent
+ service:
+ type: NodePort
+ portName: tcp-reloader
+ resources: {}
+ # limits:
+ # cpu: 50m
+ # memory: 64Mi
+ # requests:
+ # cpu: 50m
+ # memory: 64Mi
+ imagePullPolicy: IfNotPresent
+ persistent: true
+ storageClassName: local-storage
+ storage: 10Gi
+ nodeSelector: {}
+ annotations: {}
+ tolerations: []
+ kubePrometheusURL: http://prometheus-k8s.monitoring.svc:9090
+ alertmanagerURL: ""
diff --git a/misc/images/tidb-control/Dockerfile b/misc/images/tidb-control/Dockerfile
index bb60e584da..92db3cebe6 100644
--- a/misc/images/tidb-control/Dockerfile
+++ b/misc/images/tidb-control/Dockerfile
@@ -1,14 +1,12 @@
FROM bash:4.3.48
RUN wget -q http://download.pingcap.org/tidb-latest-linux-amd64.tar.gz \
&& tar xzf tidb-latest-linux-amd64.tar.gz \
- && mv tidb-latest-linux-amd64/bin/pd-ctl \
- tidb-latest-linux-amd64/bin/tidb-ctl \
+ && mv tidb-*-linux-amd64/bin/pd-ctl \
+ tidb-*-linux-amd64/bin/tidb-ctl \
/usr/local/bin/ \
- && rm -rf tidb-latest-linux-amd64.tar.gz tidb-latest-linux-amd64
+ && rm -rf tidb-latest-linux-amd64.tar.gz tidb-*-linux-amd64
ADD banner /etc/banner
ADD profile /etc/profile
CMD ["/usr/local/bin/bash", "-l"]
-
-
diff --git a/misc/images/tidb-debug/Dockerfile b/misc/images/tidb-debug/Dockerfile
index 78e764047c..cde352becc 100644
--- a/misc/images/tidb-debug/Dockerfile
+++ b/misc/images/tidb-debug/Dockerfile
@@ -27,8 +27,8 @@ RUN yum update -y && yum install -y \
RUN wget -q http://download.pingcap.org/tidb-latest-linux-amd64.tar.gz \
&& tar xzf tidb-latest-linux-amd64.tar.gz \
- && mv tidb-latest-linux-amd64/bin/* /usr/local/bin/ \
- && rm -rf tidb-latest-linux-amd64.tar.gz tidb-latest-linux-amd64
+ && mv tidb-*-linux-amd64/bin/* /usr/local/bin/ \
+ && rm -rf tidb-latest-linux-amd64.tar.gz tidb-*-linux-amd64
RUN wget https://github.com/brendangregg/FlameGraph/archive/master.zip \
&& unzip master.zip \
diff --git a/pkg/apis/pingcap/v1alpha1/defaulting/tidbcluster.go b/pkg/apis/pingcap/v1alpha1/defaulting/tidbcluster.go
index 9bd5aa4783..84533f80bc 100644
--- a/pkg/apis/pingcap/v1alpha1/defaulting/tidbcluster.go
+++ b/pkg/apis/pingcap/v1alpha1/defaulting/tidbcluster.go
@@ -16,38 +16,114 @@ package defaulting
import (
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
corev1 "k8s.io/api/core/v1"
+ "k8s.io/utils/pointer"
)
const (
- defaultTiDBImage = "pingcap/tidb"
- defaultTiKVImage = "pingcap/tikv"
- defaultPDImage = "pingcap/pd"
- defaultBinlogImage = "pingcap/tidb-binlog"
+ defaultTiDBImage = "pingcap/tidb"
+ defaultTiKVImage = "pingcap/tikv"
+ defaultPDImage = "pingcap/pd"
+ defaultBinlogImage = "pingcap/tidb-binlog"
+ defaultTiFlashImage = "pingcap/tiflash"
+)
+
+var (
+ tidbLogMaxBackups = 3
)
func SetTidbClusterDefault(tc *v1alpha1.TidbCluster) {
- if tc.Spec.TiDB.BaseImage == "" {
- tc.Spec.TiDB.BaseImage = defaultTiDBImage
+ setTidbClusterSpecDefault(tc)
+ setPdSpecDefault(tc)
+ setTikvSpecDefault(tc)
+ setTidbSpecDefault(tc)
+ if tc.Spec.Pump != nil {
+ setPumpSpecDefault(tc)
}
- if tc.Spec.TiKV.BaseImage == "" {
- tc.Spec.TiKV.BaseImage = defaultTiKVImage
+ if tc.Spec.TiFlash != nil {
+ setTiFlashSpecDefault(tc)
}
- if tc.Spec.PD.BaseImage == "" {
- tc.Spec.PD.BaseImage = defaultPDImage
+}
+
+// setTidbClusterSpecDefault is only managed the property under Spec
+func setTidbClusterSpecDefault(tc *v1alpha1.TidbCluster) {
+ if string(tc.Spec.ImagePullPolicy) == "" {
+ tc.Spec.ImagePullPolicy = corev1.PullIfNotPresent
}
- if tc.Spec.Pump != nil && tc.Spec.Pump.BaseImage == "" {
- tc.Spec.Pump.BaseImage = defaultBinlogImage
+ if tc.Spec.TLSCluster == nil {
+ tc.Spec.TLSCluster = &v1alpha1.TLSCluster{Enabled: false}
}
- if tc.Spec.TiDB.Config == nil {
- tc.Spec.TiDB.Config = &v1alpha1.TiDBConfig{}
+ if tc.Spec.EnablePVReclaim == nil {
+ d := false
+ tc.Spec.EnablePVReclaim = &d
}
- if tc.Spec.TiKV.Config == nil {
- tc.Spec.TiKV.Config = &v1alpha1.TiKVConfig{}
+}
+
+func setTidbSpecDefault(tc *v1alpha1.TidbCluster) {
+ if len(tc.Spec.Version) > 0 || tc.Spec.TiDB.Version != nil {
+ if tc.Spec.TiDB.BaseImage == "" {
+ tc.Spec.TiDB.BaseImage = defaultTiDBImage
+ }
}
- if tc.Spec.PD.Config == nil {
- tc.Spec.PD.Config = &v1alpha1.PDConfig{}
+ if tc.Spec.TiDB.MaxFailoverCount == nil {
+ tc.Spec.TiDB.MaxFailoverCount = pointer.Int32Ptr(3)
}
- if string(tc.Spec.ImagePullPolicy) == "" {
- tc.Spec.ImagePullPolicy = corev1.PullIfNotPresent
+
+ // we only set default log
+ if tc.Spec.TiDB.Config != nil {
+ if tc.Spec.TiDB.Config.Log == nil {
+ tc.Spec.TiDB.Config.Log = &v1alpha1.Log{
+ File: &v1alpha1.FileLogConfig{
+ MaxBackups: &tidbLogMaxBackups,
+ },
+ }
+ } else {
+ if tc.Spec.TiDB.Config.Log.File == nil {
+ tc.Spec.TiDB.Config.Log.File = &v1alpha1.FileLogConfig{
+ MaxBackups: &tidbLogMaxBackups,
+ }
+ } else {
+ if tc.Spec.TiDB.Config.Log.File.MaxBackups == nil {
+ tc.Spec.TiDB.Config.Log.File.MaxBackups = &tidbLogMaxBackups
+ }
+ }
+ }
+ }
+}
+
+func setTikvSpecDefault(tc *v1alpha1.TidbCluster) {
+ if len(tc.Spec.Version) > 0 || tc.Spec.TiKV.Version != nil {
+ if tc.Spec.TiKV.BaseImage == "" {
+ tc.Spec.TiKV.BaseImage = defaultTiKVImage
+ }
+ }
+ if tc.Spec.TiKV.MaxFailoverCount == nil {
+ tc.Spec.TiKV.MaxFailoverCount = pointer.Int32Ptr(3)
+ }
+}
+
+func setPdSpecDefault(tc *v1alpha1.TidbCluster) {
+ if len(tc.Spec.Version) > 0 || tc.Spec.PD.Version != nil {
+ if tc.Spec.PD.BaseImage == "" {
+ tc.Spec.PD.BaseImage = defaultPDImage
+ }
+ }
+ if tc.Spec.PD.MaxFailoverCount == nil {
+ tc.Spec.PD.MaxFailoverCount = pointer.Int32Ptr(3)
+ }
+}
+
+func setPumpSpecDefault(tc *v1alpha1.TidbCluster) {
+ if len(tc.Spec.Version) > 0 || tc.Spec.Pump.Version != nil {
+ if tc.Spec.Pump.BaseImage == "" {
+ tc.Spec.Pump.BaseImage = defaultBinlogImage
+ }
+ }
+}
+
+func setTiFlashSpecDefault(tc *v1alpha1.TidbCluster) {
+ if len(tc.Spec.Version) > 0 || tc.Spec.TiFlash.Version != nil {
+ if tc.Spec.TiFlash.BaseImage == "" {
+ tc.Spec.TiFlash.BaseImage = defaultTiFlashImage
+ }
}
}
diff --git a/pkg/apis/pingcap/v1alpha1/defaulting/tidbcluster_test.go b/pkg/apis/pingcap/v1alpha1/defaulting/tidbcluster_test.go
new file mode 100644
index 0000000000..fa57445328
--- /dev/null
+++ b/pkg/apis/pingcap/v1alpha1/defaulting/tidbcluster_test.go
@@ -0,0 +1,96 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package defaulting
+
+import (
+ . "github.com/onsi/gomega"
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ "testing"
+)
+
+func TestSetTidbSpecDefault(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ tc := newTidbCluster()
+ setTidbSpecDefault(tc)
+ g.Expect(tc.Spec.TiDB.Config).Should(BeNil())
+
+ tc = newTidbCluster()
+ tc.Spec.TiDB.Config = &v1alpha1.TiDBConfig{}
+ setTidbSpecDefault(tc)
+ g.Expect(*tc.Spec.TiDB.Config.Log.File.MaxBackups).Should(Equal(tidbLogMaxBackups))
+
+ tc = newTidbCluster()
+ oomAction := "cancel"
+ tc.Spec.TiDB.Config = &v1alpha1.TiDBConfig{
+ OOMAction: &oomAction,
+ }
+ setTidbSpecDefault(tc)
+ g.Expect(*tc.Spec.TiDB.Config.Log.File.MaxBackups).Should(Equal(tidbLogMaxBackups))
+ g.Expect(*tc.Spec.TiDB.Config.OOMAction).Should(Equal(oomAction))
+
+ tc = newTidbCluster()
+ infoLevel := "info"
+ tc.Spec.TiDB.Config = &v1alpha1.TiDBConfig{
+ OOMAction: &oomAction,
+ Log: &v1alpha1.Log{
+ Level: &infoLevel,
+ },
+ }
+ setTidbSpecDefault(tc)
+ g.Expect(*tc.Spec.TiDB.Config.Log.File.MaxBackups).Should(Equal(tidbLogMaxBackups))
+ g.Expect(*tc.Spec.TiDB.Config.OOMAction).Should(Equal(oomAction))
+ g.Expect(*tc.Spec.TiDB.Config.Log.Level).Should(Equal(infoLevel))
+
+ tc = newTidbCluster()
+ fileName := "slowlog.log"
+ tc.Spec.TiDB.Config = &v1alpha1.TiDBConfig{
+ OOMAction: &oomAction,
+ Log: &v1alpha1.Log{
+ Level: &infoLevel,
+ File: &v1alpha1.FileLogConfig{
+ Filename: &fileName,
+ },
+ },
+ }
+ setTidbSpecDefault(tc)
+ g.Expect(*tc.Spec.TiDB.Config.Log.File.MaxBackups).Should(Equal(tidbLogMaxBackups))
+ g.Expect(*tc.Spec.TiDB.Config.OOMAction).Should(Equal(oomAction))
+ g.Expect(*tc.Spec.TiDB.Config.Log.Level).Should(Equal(infoLevel))
+ g.Expect(*tc.Spec.TiDB.Config.Log.File.Filename).Should(Equal(fileName))
+
+ tc = newTidbCluster()
+ maxSize := 600
+ tc.Spec.TiDB.Config = &v1alpha1.TiDBConfig{
+ OOMAction: &oomAction,
+ Log: &v1alpha1.Log{
+ Level: &infoLevel,
+ File: &v1alpha1.FileLogConfig{
+ Filename: &fileName,
+ MaxSize: &maxSize,
+ },
+ },
+ }
+ setTidbSpecDefault(tc)
+ g.Expect(*tc.Spec.TiDB.Config.Log.File.MaxSize).Should(Equal(maxSize))
+ g.Expect(*tc.Spec.TiDB.Config.Log.File.MaxBackups).Should(Equal(tidbLogMaxBackups))
+ g.Expect(*tc.Spec.TiDB.Config.OOMAction).Should(Equal(oomAction))
+ g.Expect(*tc.Spec.TiDB.Config.Log.Level).Should(Equal(infoLevel))
+ g.Expect(*tc.Spec.TiDB.Config.Log.File.Filename).Should(Equal(fileName))
+
+}
+
+func newTidbCluster() *v1alpha1.TidbCluster {
+ return &v1alpha1.TidbCluster{}
+}
diff --git a/pkg/apis/pingcap/v1alpha1/openapi_generated.go b/pkg/apis/pingcap/v1alpha1/openapi_generated.go
index 1a92415d11..cd16aa5979 100644
--- a/pkg/apis/pingcap/v1alpha1/openapi_generated.go
+++ b/pkg/apis/pingcap/v1alpha1/openapi_generated.go
@@ -35,12 +35,23 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BackupScheduleSpec": schema_pkg_apis_pingcap_v1alpha1_BackupScheduleSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BackupSpec": schema_pkg_apis_pingcap_v1alpha1_BackupSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BasicAutoScalerSpec": schema_pkg_apis_pingcap_v1alpha1_BasicAutoScalerSpec(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BasicAutoScalerStatus": schema_pkg_apis_pingcap_v1alpha1_BasicAutoScalerStatus(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Binlog": schema_pkg_apis_pingcap_v1alpha1_Binlog(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.CommonConfig": schema_pkg_apis_pingcap_v1alpha1_CommonConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ComponentSpec": schema_pkg_apis_pingcap_v1alpha1_ComponentSpec(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Experimental": schema_pkg_apis_pingcap_v1alpha1_Experimental(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.FileLogConfig": schema_pkg_apis_pingcap_v1alpha1_FileLogConfig(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Flash": schema_pkg_apis_pingcap_v1alpha1_Flash(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.FlashCluster": schema_pkg_apis_pingcap_v1alpha1_FlashCluster(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.FlashLogger": schema_pkg_apis_pingcap_v1alpha1_FlashLogger(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.GcsStorageProvider": schema_pkg_apis_pingcap_v1alpha1_GcsStorageProvider(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.HelperSpec": schema_pkg_apis_pingcap_v1alpha1_HelperSpec(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.IsolationRead": schema_pkg_apis_pingcap_v1alpha1_IsolationRead(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Log": schema_pkg_apis_pingcap_v1alpha1_Log(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.LogTailerSpec": schema_pkg_apis_pingcap_v1alpha1_LogTailerSpec(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.MasterKeyFileConfig": schema_pkg_apis_pingcap_v1alpha1_MasterKeyFileConfig(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.MasterKeyKMSConfig": schema_pkg_apis_pingcap_v1alpha1_MasterKeyKMSConfig(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.MetricsStatus": schema_pkg_apis_pingcap_v1alpha1_MetricsStatus(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.MonitorContainer": schema_pkg_apis_pingcap_v1alpha1_MonitorContainer(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.OpenTracing": schema_pkg_apis_pingcap_v1alpha1_OpenTracing(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.OpenTracingReporter": schema_pkg_apis_pingcap_v1alpha1_OpenTracingReporter(ref),
@@ -71,12 +82,15 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ServiceSpec": schema_pkg_apis_pingcap_v1alpha1_ServiceSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Status": schema_pkg_apis_pingcap_v1alpha1_Status(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StmtSummary": schema_pkg_apis_pingcap_v1alpha1_StmtSummary(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StorageClaim": schema_pkg_apis_pingcap_v1alpha1_StorageClaim(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StorageProvider": schema_pkg_apis_pingcap_v1alpha1_StorageProvider(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBAccessConfig": schema_pkg_apis_pingcap_v1alpha1_TiDBAccessConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBConfig": schema_pkg_apis_pingcap_v1alpha1_TiDBConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBServiceSpec": schema_pkg_apis_pingcap_v1alpha1_TiDBServiceSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSlowLogTailerSpec": schema_pkg_apis_pingcap_v1alpha1_TiDBSlowLogTailerSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSpec": schema_pkg_apis_pingcap_v1alpha1_TiDBSpec(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiFlashConfig": schema_pkg_apis_pingcap_v1alpha1_TiFlashConfig(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiFlashSpec": schema_pkg_apis_pingcap_v1alpha1_TiFlashSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVBlockCacheConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVBlockCacheConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCfConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVCfConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVClient": schema_pkg_apis_pingcap_v1alpha1_TiKVClient(ref),
@@ -84,8 +98,10 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCoprocessorConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVCoprocessorConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCoprocessorReadPoolConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVCoprocessorReadPoolConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVDbConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVDbConfig(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVEncryptionConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVEncryptionConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVGCConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVGCConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVImportConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVImportConfig(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVMasterKeyConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVMasterKeyConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVPDConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVPDConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVRaftDBConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVRaftDBConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVRaftstoreConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVRaftstoreConfig(ref),
@@ -98,10 +114,12 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVTitanCfConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVTitanCfConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVTitanDBConfig": schema_pkg_apis_pingcap_v1alpha1_TiKVTitanDBConfig(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbAutoScalerSpec": schema_pkg_apis_pingcap_v1alpha1_TidbAutoScalerSpec(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbAutoScalerStatus": schema_pkg_apis_pingcap_v1alpha1_TidbAutoScalerStatus(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbCluster": schema_pkg_apis_pingcap_v1alpha1_TidbCluster(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterAutoScaler": schema_pkg_apis_pingcap_v1alpha1_TidbClusterAutoScaler(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterAutoScalerList": schema_pkg_apis_pingcap_v1alpha1_TidbClusterAutoScalerList(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterAutoScalerSpec": schema_pkg_apis_pingcap_v1alpha1_TidbClusterAutoScalerSpec(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterAutoSclaerStatus": schema_pkg_apis_pingcap_v1alpha1_TidbClusterAutoSclaerStatus(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterList": schema_pkg_apis_pingcap_v1alpha1_TidbClusterList(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterRef": schema_pkg_apis_pingcap_v1alpha1_TidbClusterRef(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterSpec": schema_pkg_apis_pingcap_v1alpha1_TidbClusterSpec(ref),
@@ -111,8 +129,10 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbInitializerStatus": schema_pkg_apis_pingcap_v1alpha1_TidbInitializerStatus(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbMonitor": schema_pkg_apis_pingcap_v1alpha1_TidbMonitor(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbMonitorList": schema_pkg_apis_pingcap_v1alpha1_TidbMonitorList(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbMonitorRef": schema_pkg_apis_pingcap_v1alpha1_TidbMonitorRef(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbMonitorSpec": schema_pkg_apis_pingcap_v1alpha1_TidbMonitorSpec(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TikvAutoScalerSpec": schema_pkg_apis_pingcap_v1alpha1_TikvAutoScalerSpec(ref),
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TikvAutoScalerStatus": schema_pkg_apis_pingcap_v1alpha1_TikvAutoScalerStatus(ref),
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TxnLocalLatches": schema_pkg_apis_pingcap_v1alpha1_TxnLocalLatches(ref),
"k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource": schema_k8sio_api_core_v1_AWSElasticBlockStoreVolumeSource(ref),
"k8s.io/api/core/v1.Affinity": schema_k8sio_api_core_v1_Affinity(ref),
@@ -374,44 +394,30 @@ func schema_pkg_apis_pingcap_v1alpha1_BRConfig(ref common.ReferenceCallback) com
Description: "BRConfig contains config for BR",
Type: []string{"object"},
Properties: map[string]spec.Schema{
- "pd": {
- SchemaProps: spec.SchemaProps{
- Description: "PDAddress is the PD address of the tidb cluster",
- Type: []string{"string"},
- Format: "",
- },
- },
- "db": {
- SchemaProps: spec.SchemaProps{
- Description: "DB is the specific DB which will be backed-up or restored",
- Type: []string{"string"},
- Format: "",
- },
- },
- "table": {
+ "cluster": {
SchemaProps: spec.SchemaProps{
- Description: "Table is the specific table which will be backed-up or restored",
+ Description: "ClusterName of backup/restore cluster",
Type: []string{"string"},
Format: "",
},
},
- "ca": {
+ "clusterNamespace": {
SchemaProps: spec.SchemaProps{
- Description: "CA is the CA certificate path for TLS connection",
+ Description: "Namespace of backup/restore cluster",
Type: []string{"string"},
Format: "",
},
},
- "cert": {
+ "db": {
SchemaProps: spec.SchemaProps{
- Description: "Cert is the certificate path for TLS connection",
+ Description: "DB is the specific DB which will be backed-up or restored",
Type: []string{"string"},
Format: "",
},
},
- "key": {
+ "table": {
SchemaProps: spec.SchemaProps{
- Description: "Key is the private key path for TLS connection",
+ Description: "Table is the specific table which will be backed-up or restored",
Type: []string{"string"},
Format: "",
},
@@ -473,7 +479,7 @@ func schema_pkg_apis_pingcap_v1alpha1_BRConfig(ref common.ReferenceCallback) com
},
},
},
- Required: []string{"pd"},
+ Required: []string{"cluster"},
},
},
}
@@ -722,6 +728,13 @@ func schema_pkg_apis_pingcap_v1alpha1_BackupSpec(ref common.ReferenceCallback) c
Format: "",
},
},
+ "tikvGCLifeTime": {
+ SchemaProps: spec.SchemaProps{
+ Description: "TikvGCLifeTime is to specify the safe gc life time for backup. The time limit during which data is retained for each GC, in the format of Go Duration. When a GC happens, the current time minus this value is the safe point.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
"s3": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.S3StorageProvider"),
@@ -752,11 +765,44 @@ func schema_pkg_apis_pingcap_v1alpha1_BackupSpec(ref common.ReferenceCallback) c
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BRConfig"),
},
},
+ "tolerations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Base tolerations of backup Pods, components may add more tolerations upon this respectively",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/api/core/v1.Toleration"),
+ },
+ },
+ },
+ },
+ },
+ "affinity": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Affinity of backup Pods",
+ Ref: ref("k8s.io/api/core/v1.Affinity"),
+ },
+ },
+ "useKMS": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Use KMS to decrypt the secrets",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "serviceAccount": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Specify service account of backup",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
},
},
},
Dependencies: []string{
- "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BRConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.GcsStorageProvider", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.S3StorageProvider", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBAccessConfig"},
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BRConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.GcsStorageProvider", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.S3StorageProvider", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBAccessConfig", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Toleration"},
}
}
@@ -808,6 +854,27 @@ func schema_pkg_apis_pingcap_v1alpha1_BasicAutoScalerSpec(ref common.ReferenceCa
},
},
},
+ "metricsTimeDuration": {
+ SchemaProps: spec.SchemaProps{
+ Description: "MetricsTimeDuration describe the Time duration to be queried in the Prometheus",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "scaleOutThreshold": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ScaleOutThreshold describe the consecutive threshold for the auto-scaling, if the consecutive counts of the scale-out result in auto-scaling reach this number, the auto-scaling would be performed. If not set, the default value is 3.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "scaleInThreshold": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ScaleInThreshold describe the consecutive threshold for the auto-scaling, if the consecutive counts of the scale-in result in auto-scaling reach this number, the auto-scaling would be performed. If not set, the default value is 5.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
},
Required: []string{"maxReplicas"},
},
@@ -817,6 +884,55 @@ func schema_pkg_apis_pingcap_v1alpha1_BasicAutoScalerSpec(ref common.ReferenceCa
}
}
+func schema_pkg_apis_pingcap_v1alpha1_BasicAutoScalerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "BasicAutoScalerStatus describe the basic auto-scaling status",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "metrics": {
+ SchemaProps: spec.SchemaProps{
+ Description: "MetricsStatusList describes the metrics status in the last auto-scaling reconciliation",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.MetricsStatus"),
+ },
+ },
+ },
+ },
+ },
+ "currentReplicas": {
+ SchemaProps: spec.SchemaProps{
+ Description: "CurrentReplicas describes the current replicas for the component(tidb/tikv)",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "recommendedReplicas": {
+ SchemaProps: spec.SchemaProps{
+ Description: "RecommendedReplicas describes the calculated replicas in the last auto-scaling reconciliation for the component(tidb/tikv)",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "lastAutoScalingTimestamp": {
+ SchemaProps: spec.SchemaProps{
+ Description: "LastAutoScalingTimestamp describes the last auto-scaling timestamp for the component(tidb/tikv)",
+ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
+ },
+ },
+ },
+ Required: []string{"currentReplicas"},
+ },
+ },
+ Dependencies: []string{
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.MetricsStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
+ }
+}
+
func schema_pkg_apis_pingcap_v1alpha1_Binlog(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@@ -824,6 +940,13 @@ func schema_pkg_apis_pingcap_v1alpha1_Binlog(ref common.ReferenceCallback) commo
Description: "Binlog is the config for binlog.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
+ "enable": {
+ SchemaProps: spec.SchemaProps{
+ Description: "optional",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
"write-timeout": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 15s",
@@ -858,6 +981,52 @@ func schema_pkg_apis_pingcap_v1alpha1_Binlog(ref common.ReferenceCallback) commo
}
}
+func schema_pkg_apis_pingcap_v1alpha1_CommonConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "CommonConfig is the configuration of TiFlash process.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "path_realtime_mode": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to false",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "mark_cache_size": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to 5368709120",
+ Type: []string{"integer"},
+ Format: "int64",
+ },
+ },
+ "minmax_index_cache_size": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to 5368709120",
+ Type: []string{"integer"},
+ Format: "int64",
+ },
+ },
+ "flash": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Flash"),
+ },
+ },
+ "loger": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.FlashLogger"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Flash", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.FlashLogger"},
+ }
+}
+
func schema_pkg_apis_pingcap_v1alpha1_ComponentSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@@ -962,11 +1131,44 @@ func schema_pkg_apis_pingcap_v1alpha1_ComponentSpec(ref common.ReferenceCallback
Format: "",
},
},
+ "env": {
+ SchemaProps: spec.SchemaProps{
+ Description: "List of environment variables to set in the container, like v1.Container.Env. Note that following env names cannot be used and may be overrided by tidb-operator built envs. - NAMESPACE - TZ - SERVICE_NAME - PEER_SERVICE_NAME - HEADLESS_SERVICE_NAME - SET_NAME - HOSTNAME - CLUSTER_NAME - POD_NAME - BINLOG_ENABLED - SLOW_LOG_FILE",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/api/core/v1.EnvVar"),
+ },
+ },
+ },
+ },
+ },
},
},
},
Dependencies: []string{
- "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"},
+ "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"},
+ }
+}
+
+func schema_pkg_apis_pingcap_v1alpha1_Experimental(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "Experimental controls the features that are still experimental: their semantics, interfaces are subject to change. Using these features in the production environment is not recommended.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "allow-auto-random": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Whether enable the syntax like `auto_random(3)` on the primary key column. imported from TiDB v3.1.0",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
}
}
@@ -1017,6 +1219,108 @@ func schema_pkg_apis_pingcap_v1alpha1_FileLogConfig(ref common.ReferenceCallback
}
}
+func schema_pkg_apis_pingcap_v1alpha1_Flash(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "Flash is the configuration of [flash] section.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "overlap_threshold": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to 0.6",
+ Type: []string{"number"},
+ Format: "double",
+ },
+ },
+ "compact_log_min_period": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to 200",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "flash_cluster": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.FlashCluster"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.FlashCluster"},
+ }
+}
+
+func schema_pkg_apis_pingcap_v1alpha1_FlashCluster(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "FlashCluster is the configuration of [flash.flash_cluster] section.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "refresh_interval": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to 20",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "update_rule_interval": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to 10",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "master_ttl": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to 60",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_pkg_apis_pingcap_v1alpha1_FlashLogger(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "FlashLogger is the configuration of [logger] section.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "size": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to 100M",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "level": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to information",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "count": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to 10",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
func schema_pkg_apis_pingcap_v1alpha1_GcsStorageProvider(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@@ -1114,6 +1418,33 @@ func schema_pkg_apis_pingcap_v1alpha1_HelperSpec(ref common.ReferenceCallback) c
}
}
+func schema_pkg_apis_pingcap_v1alpha1_IsolationRead(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "IsolationRead is the config for isolation read.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "engines": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Engines filters tidb-server access paths by engine type. imported from v3.1.0",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
func schema_pkg_apis_pingcap_v1alpha1_Log(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@@ -1142,12 +1473,38 @@ func schema_pkg_apis_pingcap_v1alpha1_Log(ref common.ReferenceCallback) common.O
Format: "",
},
},
+ "enable-timestamp": {
+ SchemaProps: spec.SchemaProps{
+ Description: "EnableTimestamp enables automatic timestamps in log output.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "enable-error-stack": {
+ SchemaProps: spec.SchemaProps{
+ Description: "EnableErrorStack enables annotating logs with the full stack error message.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
"file": {
SchemaProps: spec.SchemaProps{
Description: "File log config.",
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.FileLogConfig"),
},
},
+ "enable-slow-log": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "slow-query-file": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
"slow-threshold": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 300",
@@ -1184,6 +1541,159 @@ func schema_pkg_apis_pingcap_v1alpha1_Log(ref common.ReferenceCallback) common.O
}
}
+func schema_pkg_apis_pingcap_v1alpha1_LogTailerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "LogTailerSpec represents an optional log tailer sidecar container",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "limits": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
+ },
+ },
+ },
+ },
+ },
+ "requests": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/apimachinery/pkg/api/resource.Quantity"},
+ }
+}
+
+func schema_pkg_apis_pingcap_v1alpha1_MasterKeyFileConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "method": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Encrypyion method, use master key encryption data key Possible values: plaintext, aes128-ctr, aes192-ctr, aes256-ctr Optional: Default to plaintext optional",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "path": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Text file containing the key in hex form, end with '\n'",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"path"},
+ },
+ },
+ }
+}
+
+func schema_pkg_apis_pingcap_v1alpha1_MasterKeyKMSConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "key-id": {
+ SchemaProps: spec.SchemaProps{
+ Description: "AWS CMK key-id it can be find in AWS Console or use aws cli This field is required",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "access-key": {
+ SchemaProps: spec.SchemaProps{
+ Description: "AccessKey of AWS user, leave empty if using other authrization method optional",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "secret-access-key": {
+ SchemaProps: spec.SchemaProps{
+ Description: "SecretKey of AWS user, leave empty if using other authrization method optional",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "region": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Region of this KMS key Optional: Default to us-east-1 optional",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "endpoint": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Used for KMS compatible KMS, such as Ceph, minio, If use AWS, leave empty optional",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"key-id"},
+ },
+ },
+ }
+}
+
+func schema_pkg_apis_pingcap_v1alpha1_MetricsStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "MetricsStatus describe the basic metrics status in the last auto-scaling reconciliation",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Name indicates the metrics name",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "currentValue": {
+ SchemaProps: spec.SchemaProps{
+ Description: "CurrentValue indicates the value calculated in the last auto-scaling reconciliation",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "thresholdValue": {
+ SchemaProps: spec.SchemaProps{
+ Description: "TargetValue indicates the threshold value for this metrics in auto-scaling",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"name", "currentValue", "thresholdValue"},
+ },
+ },
+ }
+}
+
func schema_pkg_apis_pingcap_v1alpha1_MonitorContainer(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@@ -1531,11 +2041,16 @@ func schema_pkg_apis_pingcap_v1alpha1_PDConfig(ref common.ReferenceCallback) com
Format: "",
},
},
+ "dashboard": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.DashboardConfig"),
+ },
+ },
},
},
},
Dependencies: []string{
- "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDLogConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDMetricConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDNamespaceConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDReplicationConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDScheduleConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSecurityConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDServerConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDStoreLabel"},
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.DashboardConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDLogConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDMetricConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDNamespaceConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDReplicationConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDScheduleConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSecurityConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDServerConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDStoreLabel"},
}
}
@@ -1710,7 +2225,14 @@ func schema_pkg_apis_pingcap_v1alpha1_PDReplicationConfig(ref common.ReferenceCa
},
"strictly-match-label": {
SchemaProps: spec.SchemaProps{
- Description: "StrictlyMatchLabel strictly checks if the label of TiKV is matched with LocaltionLabels. Immutable, change should be made through pd-ctl after cluster creation",
+ Description: "StrictlyMatchLabel strictly checks if the label of TiKV is matched with LocaltionLabels. Immutable, change should be made through pd-ctl after cluster creation. Imported from v3.1.0",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "enable-placement-rules": {
+ SchemaProps: spec.SchemaProps{
+ Description: "When PlacementRules feature is enabled. MaxReplicas and LocationLabels are not used anymore.",
Type: []string{"string"},
Format: "",
},
@@ -1779,7 +2301,7 @@ func schema_pkg_apis_pingcap_v1alpha1_PDScheduleConfig(ref common.ReferenceCallb
},
"leader-schedule-limit": {
SchemaProps: spec.SchemaProps{
- Description: "LeaderScheduleLimit is the max coexist leader schedules. Immutable, change should be made through pd-ctl after cluster creation Optional: Defaults to 4",
+ Description: "LeaderScheduleLimit is the max coexist leader schedules. Immutable, change should be made through pd-ctl after cluster creation. Optional: Defaults to 4. Imported from v3.1.0",
Type: []string{"integer"},
Format: "int64",
},
@@ -1821,7 +2343,7 @@ func schema_pkg_apis_pingcap_v1alpha1_PDScheduleConfig(ref common.ReferenceCallb
},
"tolerant-size-ratio": {
SchemaProps: spec.SchemaProps{
- Description: "TolerantSizeRatio is the ratio of buffer size for balance scheduler. Immutable, change should be made through pd-ctl after cluster creation",
+ Description: "TolerantSizeRatio is the ratio of buffer size for balance scheduler. Immutable, change should be made through pd-ctl after cluster creation. Imported from v3.1.0",
Type: []string{"number"},
Format: "double",
},
@@ -1902,6 +2424,35 @@ func schema_pkg_apis_pingcap_v1alpha1_PDScheduleConfig(ref common.ReferenceCallb
},
},
},
+ "schedulers-payload": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Only used to display",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "enable-one-way-merge": {
+ SchemaProps: spec.SchemaProps{
+ Description: "EnableOneWayMerge is the option to enable one way merge. This means a Region can only be merged into the next region of it. Imported from v3.1.0",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "enable-cross-table-merge": {
+ SchemaProps: spec.SchemaProps{
+ Description: "EnableCrossTableMerge is the option to enable cross table merge. This means two Regions can be merged with different table IDs. This option only works when key type is \"table\". Imported from v3.1.0",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
},
},
},
@@ -1999,6 +2550,13 @@ func schema_pkg_apis_pingcap_v1alpha1_PDServerConfig(ref common.ReferenceCallbac
Format: "",
},
},
+ "metric-storage": {
+ SchemaProps: spec.SchemaProps{
+ Description: "MetricStorage is the cluster metric storage. Currently we use prometheus as metric storage, we may use PD/TiKV as metric storage later. Imported from v3.1.0",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
},
},
},
@@ -2109,6 +2667,19 @@ func schema_pkg_apis_pingcap_v1alpha1_PDSpec(ref common.ReferenceCallback) commo
Format: "",
},
},
+ "env": {
+ SchemaProps: spec.SchemaProps{
+ Description: "List of environment variables to set in the container, like v1.Container.Env. Note that following env names cannot be used and may be overrided by tidb-operator built envs. - NAMESPACE - TZ - SERVICE_NAME - PEER_SERVICE_NAME - HEADLESS_SERVICE_NAME - SET_NAME - HOSTNAME - CLUSTER_NAME - POD_NAME - BINLOG_ENABLED - SLOW_LOG_FILE",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/api/core/v1.EnvVar"),
+ },
+ },
+ },
+ },
+ },
"limits": {
SchemaProps: spec.SchemaProps{
Description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
@@ -2157,6 +2728,13 @@ func schema_pkg_apis_pingcap_v1alpha1_PDSpec(ref common.ReferenceCallback) commo
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ServiceSpec"),
},
},
+ "maxFailoverCount": {
+ SchemaProps: spec.SchemaProps{
+ Description: "MaxFailoverCount limit the max replicas could be added in failover, 0 means no failover. Optional: Defaults to 3",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
"storageClassName": {
SchemaProps: spec.SchemaProps{
Description: "The storageClassName of the persistent volume for PD data storage. Defaults to Kubernetes default storage class.",
@@ -2175,7 +2753,7 @@ func schema_pkg_apis_pingcap_v1alpha1_PDSpec(ref common.ReferenceCallback) commo
},
},
Dependencies: []string{
- "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ServiceSpec", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ServiceSpec", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
@@ -2224,20 +2802,6 @@ func schema_pkg_apis_pingcap_v1alpha1_Performance(ref common.ReferenceCallback)
Format: "int64",
},
},
- "tcp-keep-alive": {
- SchemaProps: spec.SchemaProps{
- Description: "Optional: Defaults to true",
- Type: []string{"boolean"},
- Format: "",
- },
- },
- "cross-join": {
- SchemaProps: spec.SchemaProps{
- Description: "Optional: Defaults to true",
- Type: []string{"boolean"},
- Format: "",
- },
- },
"stats-lease": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 3s",
@@ -2245,13 +2809,6 @@ func schema_pkg_apis_pingcap_v1alpha1_Performance(ref common.ReferenceCallback)
Format: "",
},
},
- "run-auto-analyze": {
- SchemaProps: spec.SchemaProps{
- Description: "Optional: Defaults to true",
- Type: []string{"boolean"},
- Format: "",
- },
- },
"stmt-count-limit": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 5000",
@@ -2294,13 +2851,6 @@ func schema_pkg_apis_pingcap_v1alpha1_Performance(ref common.ReferenceCallback)
Format: "",
},
},
- "txn-entry-count-limit": {
- SchemaProps: spec.SchemaProps{
- Description: "Optional: Defaults to 300000",
- Type: []string{"integer"},
- Format: "int64",
- },
- },
"txn-total-size-limit": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 104857600",
@@ -2308,8 +2858,36 @@ func schema_pkg_apis_pingcap_v1alpha1_Performance(ref common.ReferenceCallback)
Format: "int64",
},
},
- },
- },
+ "tcp-keep-alive": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to true",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "cross-join": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to true",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "run-auto-analyze": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to true",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "txn-entry-count-limit": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to 300000",
+ Type: []string{"integer"},
+ Format: "int64",
+ },
+ },
+ },
+ },
},
}
}
@@ -2562,6 +3140,19 @@ func schema_pkg_apis_pingcap_v1alpha1_PumpSpec(ref common.ReferenceCallback) com
Format: "",
},
},
+ "env": {
+ SchemaProps: spec.SchemaProps{
+ Description: "List of environment variables to set in the container, like v1.Container.Env. Note that following env names cannot be used and may be overrided by tidb-operator built envs. - NAMESPACE - TZ - SERVICE_NAME - PEER_SERVICE_NAME - HEADLESS_SERVICE_NAME - SET_NAME - HOSTNAME - CLUSTER_NAME - POD_NAME - BINLOG_ENABLED - SLOW_LOG_FILE",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/api/core/v1.EnvVar"),
+ },
+ },
+ },
+ },
+ },
"limits": {
SchemaProps: spec.SchemaProps{
Description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
@@ -2630,7 +3221,7 @@ func schema_pkg_apis_pingcap_v1alpha1_PumpSpec(ref common.ReferenceCallback) com
},
},
Dependencies: []string{
- "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
+ "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
@@ -2731,6 +3322,13 @@ func schema_pkg_apis_pingcap_v1alpha1_RestoreSpec(ref common.ReferenceCallback)
Format: "",
},
},
+ "tikvGCLifeTime": {
+ SchemaProps: spec.SchemaProps{
+ Description: "TikvGCLifeTime is to specify the safe gc life time for restore. The time limit during which data is retained for each GC, in the format of Go Duration. When a GC happens, the current time minus this value is the safe point.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
"s3": {
SchemaProps: spec.SchemaProps{
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.S3StorageProvider"),
@@ -2761,11 +3359,44 @@ func schema_pkg_apis_pingcap_v1alpha1_RestoreSpec(ref common.ReferenceCallback)
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BRConfig"),
},
},
+ "tolerations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Base tolerations of restore Pods, components may add more tolerations upon this respectively",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/api/core/v1.Toleration"),
+ },
+ },
+ },
+ },
+ },
+ "affinity": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Affinity of restore Pods",
+ Ref: ref("k8s.io/api/core/v1.Affinity"),
+ },
+ },
+ "useKMS": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Use KMS to decrypt the secrets",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "serviceAccount": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Specify service account of restore",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
},
},
},
Dependencies: []string{
- "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BRConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.GcsStorageProvider", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.S3StorageProvider", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBAccessConfig"},
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.BRConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.GcsStorageProvider", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.S3StorageProvider", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBAccessConfig", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Toleration"},
}
}
@@ -2847,7 +3478,7 @@ func schema_pkg_apis_pingcap_v1alpha1_S3StorageProvider(ref common.ReferenceCall
},
},
},
- Required: []string{"provider", "secretName"},
+ Required: []string{"provider"},
},
},
}
@@ -2970,13 +3601,6 @@ func schema_pkg_apis_pingcap_v1alpha1_Status(ref common.ReferenceCallback) commo
Description: "Status is the status section of the config.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
- "report-status": {
- SchemaProps: spec.SchemaProps{
- Description: "Optional: Defaults to true",
- Type: []string{"boolean"},
- Format: "",
- },
- },
"metrics-addr": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
@@ -2990,6 +3614,13 @@ func schema_pkg_apis_pingcap_v1alpha1_Status(ref common.ReferenceCallback) commo
Format: "int32",
},
},
+ "report-status": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to true",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
"record-db-qps": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to false",
@@ -3010,6 +3641,13 @@ func schema_pkg_apis_pingcap_v1alpha1_StmtSummary(ref common.ReferenceCallback)
Description: "StmtSummary is the config for statement summary.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
+ "enable": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Enable statement summary or not.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
"max-stmt-count": {
SchemaProps: spec.SchemaProps{
Description: "The maximum number of statements kept in memory. Optional: Defaults to 100",
@@ -3024,9 +3662,51 @@ func schema_pkg_apis_pingcap_v1alpha1_StmtSummary(ref common.ReferenceCallback)
Format: "int32",
},
},
+ "refresh-interval": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The refresh interval of statement summary.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "history-size": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The maximum history size of statement summary.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_pkg_apis_pingcap_v1alpha1_StorageClaim(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "StorageClaim contains details of TiFlash storages",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "resources": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources",
+ Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
+ },
+ },
+ "storageClassName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
},
},
},
+ Dependencies: []string{
+ "k8s.io/api/core/v1.ResourceRequirements"},
}
}
@@ -3090,10 +3770,18 @@ func schema_pkg_apis_pingcap_v1alpha1_TiDBAccessConfig(ref common.ReferenceCallb
Format: "",
},
},
+ "tlsClient": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Whether enable the TLS connection between the SQL client and TiDB server Optional: Defaults to nil",
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBTLSClient"),
+ },
+ },
},
Required: []string{"host", "secretName"},
},
},
+ Dependencies: []string{
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBTLSClient"},
}
}
@@ -3101,7 +3789,7 @@ func schema_pkg_apis_pingcap_v1alpha1_TiDBConfig(ref common.ReferenceCallback) c
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
- Description: "TiDBConfig is the configuration of tidb-server",
+ Description: "TiDBConfig is the configuration of tidb-server For more detail, refer to https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration/",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"cors": {
@@ -3228,137 +3916,452 @@ func schema_pkg_apis_pingcap_v1alpha1_TiDBConfig(ref common.ReferenceCallback) c
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Binlog"),
},
},
- "compatible-kill-query": {
+ "compatible-kill-query": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "plugin": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Plugin"),
+ },
+ },
+ "pessimistic-txn": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PessimisticTxn"),
+ },
+ },
+ "check-mb4-value-in-utf8": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to true",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "alter-primary-key": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to false",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "treat-old-version-utf8-as-utf8mb4": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to true",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "split-region-max-num": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to 1000",
+ Type: []string{"integer"},
+ Format: "int64",
+ },
+ },
+ "stmt-summary": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StmtSummary"),
+ },
+ },
+ "repair-mode": {
+ SchemaProps: spec.SchemaProps{
+ Description: "RepairMode indicates that the TiDB is in the repair mode for table meta.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "repair-table-list": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "isolation-read": {
+ SchemaProps: spec.SchemaProps{
+ Description: "IsolationRead indicates that the TiDB reads data from which isolation level(engine and label).",
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.IsolationRead"),
+ },
+ },
+ "max-server-connections": {
+ SchemaProps: spec.SchemaProps{
+ Description: "MaxServerConnections is the maximum permitted number of simultaneous client connections.",
+ Type: []string{"integer"},
+ Format: "int64",
+ },
+ },
+ "new_collations_enabled_on_first_bootstrap": {
+ SchemaProps: spec.SchemaProps{
+ Description: "NewCollationsEnabledOnFirstBootstrap indicates if the new collations are enabled, it effects only when a TiDB cluster bootstrapped on the first time.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "experimental": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Experimental contains parameters for experimental features.",
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Experimental"),
+ },
+ },
+ "enable-dynamic-config": {
+ SchemaProps: spec.SchemaProps{
+ Description: "EnableDynamicConfig enables the TiDB to fetch configs from PD and update itself during runtime. see https://github.com/pingcap/tidb/pull/13660 for more details.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "enable-table-lock": {
+ SchemaProps: spec.SchemaProps{
+ Description: "imported from v3.1.0 optional",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "delay-clean-table-lock": {
+ SchemaProps: spec.SchemaProps{
+ Description: "imported from v3.1.0 optional",
+ Type: []string{"integer"},
+ Format: "int64",
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Binlog", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Experimental", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.IsolationRead", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Log", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.OpenTracing", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Performance", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PessimisticTxn", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Plugin", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PreparedPlanCache", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ProxyProtocol", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Security", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Status", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StmtSummary", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVClient", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TxnLocalLatches"},
+ }
+}
+
+func schema_pkg_apis_pingcap_v1alpha1_TiDBServiceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "externalTrafficPolicy": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ExternalTrafficPolicy of the service Optional: Defaults to omitted",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "exposeStatus": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Whether expose the status port Optional: Defaults to true",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_pkg_apis_pingcap_v1alpha1_TiDBSlowLogTailerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "TiDBSlowLogTailerSpec represents an optional log tailer sidecar with TiDB",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "limits": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
+ },
+ },
+ },
+ },
+ },
+ "requests": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/apimachinery/pkg/api/resource.Quantity"},
+ }
+}
+
+func schema_pkg_apis_pingcap_v1alpha1_TiDBSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "TiDBSpec contains details of TiDB members",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "version": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Version of the component. Override the cluster-level version if non-empty Optional: Defaults to cluster-level setting",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "imagePullPolicy": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ImagePullPolicy of the component. Override the cluster-level imagePullPolicy if present Optional: Defaults to cluster-level setting",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "hostNetwork": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Whether Hostnetwork of the component is enabled. Override the cluster-level setting if present Optional: Defaults to cluster-level setting",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "affinity": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Affinity of the component. Override the cluster-level one if present Optional: Defaults to cluster-level setting",
+ Ref: ref("k8s.io/api/core/v1.Affinity"),
+ },
+ },
+ "priorityClassName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "PriorityClassName of the component. Override the cluster-level one if present Optional: Defaults to cluster-level setting",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "schedulerName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "SchedulerName of the component. Override the cluster-level one if present Optional: Defaults to cluster-level setting",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "nodeSelector": {
+ SchemaProps: spec.SchemaProps{
+ Description: "NodeSelector of the component. Merged into the cluster-level nodeSelector if non-empty Optional: Defaults to cluster-level setting",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "annotations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Annotations of the component. Merged into the cluster-level annotations if non-empty Optional: Defaults to cluster-level setting",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "tolerations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Tolerations of the component. Override the cluster-level tolerations if non-empty Optional: Defaults to cluster-level setting",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/api/core/v1.Toleration"),
+ },
+ },
+ },
+ },
+ },
+ "podSecurityContext": {
+ SchemaProps: spec.SchemaProps{
+ Description: "PodSecurityContext of the component",
+ Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
+ },
+ },
+ "configUpdateStrategy": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ConfigUpdateStrategy of the component. Override the cluster-level updateStrategy if present Optional: Defaults to cluster-level setting",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "env": {
+ SchemaProps: spec.SchemaProps{
+ Description: "List of environment variables to set in the container, like v1.Container.Env. Note that following env names cannot be used and may be overrided by tidb-operator built envs. - NAMESPACE - TZ - SERVICE_NAME - PEER_SERVICE_NAME - HEADLESS_SERVICE_NAME - SET_NAME - HOSTNAME - CLUSTER_NAME - POD_NAME - BINLOG_ENABLED - SLOW_LOG_FILE",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/api/core/v1.EnvVar"),
+ },
+ },
+ },
+ },
+ },
+ "limits": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
+ },
+ },
+ },
+ },
+ },
+ "requests": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
+ },
+ },
+ },
+ },
+ },
+ "replicas": {
SchemaProps: spec.SchemaProps{
- Type: []string{"boolean"},
- Format: "",
+ Description: "The desired ready replicas",
+ Type: []string{"integer"},
+ Format: "int32",
},
},
- "plugin": {
+ "baseImage": {
SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Plugin"),
+ Description: "Base image of the component, image tag is now allowed during validation",
+ Type: []string{"string"},
+ Format: "",
},
},
- "pessimistic-txn": {
+ "service": {
SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PessimisticTxn"),
+ Description: "Service defines a Kubernetes service of TiDB cluster. Optional: No kubernetes service will be created by default.",
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBServiceSpec"),
},
},
- "check-mb4-value-in-utf8": {
+ "binlogEnabled": {
SchemaProps: spec.SchemaProps{
- Description: "Optional: Defaults to true",
+ Description: "Whether enable TiDB Binlog, it is encouraged to not set this field and rely on the default behavior Optional: Defaults to true if PumpSpec is non-nil, otherwise false",
Type: []string{"boolean"},
Format: "",
},
},
- "alter-primary-key": {
+ "maxFailoverCount": {
SchemaProps: spec.SchemaProps{
- Description: "Optional: Defaults to false",
- Type: []string{"boolean"},
- Format: "",
+ Description: "MaxFailoverCount limit the max replicas could be added in failover, 0 means no failover Optional: Defaults to 3",
+ Type: []string{"integer"},
+ Format: "int32",
},
},
- "treat-old-version-utf8-as-utf8mb4": {
+ "separateSlowLog": {
SchemaProps: spec.SchemaProps{
- Description: "Optional: Defaults to true",
+ Description: "Whether output the slow log in an separate sidecar container Optional: Defaults to true",
Type: []string{"boolean"},
Format: "",
},
},
- "split-region-max-num": {
+ "tlsClient": {
SchemaProps: spec.SchemaProps{
- Description: "Optional: Defaults to 1000",
- Type: []string{"integer"},
- Format: "int64",
+ Description: "Whether enable the TLS connection between the SQL client and TiDB server Optional: Defaults to nil",
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBTLSClient"),
},
},
- "stmt-summary": {
+ "slowLogTailer": {
SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StmtSummary"),
+ Description: "The spec of the slow log tailer sidecar",
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSlowLogTailerSpec"),
},
},
- },
- },
- },
- Dependencies: []string{
- "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Binlog", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Log", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.OpenTracing", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Performance", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PessimisticTxn", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Plugin", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PreparedPlanCache", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.ProxyProtocol", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Security", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.Status", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StmtSummary", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVClient", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TxnLocalLatches"},
- }
-}
-
-func schema_pkg_apis_pingcap_v1alpha1_TiDBServiceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Type: []string{"object"},
- Properties: map[string]spec.Schema{
- "externalTrafficPolicy": {
+ "plugins": {
SchemaProps: spec.SchemaProps{
- Description: "ExternalTrafficPolicy of the service Optional: Defaults to omitted",
- Type: []string{"string"},
- Format: "",
+ Description: "Plugins is a list of plugins that are loaded by TiDB server, empty means plugin disabled",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
},
},
- "exposeStatus": {
+ "config": {
SchemaProps: spec.SchemaProps{
- Description: "Whether expose the status port Optional: Defaults to true",
- Type: []string{"boolean"},
- Format: "",
+ Description: "Config is the Configuration of tidb-servers",
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBConfig"),
},
},
},
+ Required: []string{"replicas"},
},
},
+ Dependencies: []string{
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBServiceSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSlowLogTailerSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBTLSClient", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
-func schema_pkg_apis_pingcap_v1alpha1_TiDBSlowLogTailerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+func schema_pkg_apis_pingcap_v1alpha1_TiFlashConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
- Description: "TiDBSlowLogTailerSpec represents an optional log tailer sidecar with TiDB",
+ Description: "TiFlashConfig is the configuration of TiFlash.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
- "limits": {
- SchemaProps: spec.SchemaProps{
- Description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
- Type: []string{"object"},
- AdditionalProperties: &spec.SchemaOrBool{
- Allows: true,
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
- },
- },
- },
- },
- },
- "requests": {
+ "config": {
SchemaProps: spec.SchemaProps{
- Description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
- Type: []string{"object"},
- AdditionalProperties: &spec.SchemaOrBool{
- Allows: true,
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
- },
- },
- },
+ Description: "commonConfig is the Configuration of TiFlash process",
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.CommonConfig"),
},
},
},
},
},
Dependencies: []string{
- "k8s.io/apimachinery/pkg/api/resource.Quantity"},
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.CommonConfig"},
}
}
-func schema_pkg_apis_pingcap_v1alpha1_TiDBSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+func schema_pkg_apis_pingcap_v1alpha1_TiFlashSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
- Description: "TiDBSpec contains details of TiDB members",
+ Description: "TiFlashSpec contains details of TiFlash members",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"version": {
@@ -3458,6 +4461,19 @@ func schema_pkg_apis_pingcap_v1alpha1_TiDBSpec(ref common.ReferenceCallback) com
Format: "",
},
},
+ "env": {
+ SchemaProps: spec.SchemaProps{
+ Description: "List of environment variables to set in the container, like v1.Container.Env. Note that following env names cannot be used and may be overrided by tidb-operator built envs. - NAMESPACE - TZ - SERVICE_NAME - PEER_SERVICE_NAME - HEADLESS_SERVICE_NAME - SET_NAME - HOSTNAME - CLUSTER_NAME - POD_NAME - BINLOG_ENABLED - SLOW_LOG_FILE",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/api/core/v1.EnvVar"),
+ },
+ },
+ },
+ },
+ },
"limits": {
SchemaProps: spec.SchemaProps{
Description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
@@ -3486,6 +4502,13 @@ func schema_pkg_apis_pingcap_v1alpha1_TiDBSpec(ref common.ReferenceCallback) com
},
},
},
+ "serviceAccount": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Specify a Service Account for TiFlash",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
"replicas": {
SchemaProps: spec.SchemaProps{
Description: "The desired ready replicas",
@@ -3500,55 +4523,28 @@ func schema_pkg_apis_pingcap_v1alpha1_TiDBSpec(ref common.ReferenceCallback) com
Format: "",
},
},
- "service": {
- SchemaProps: spec.SchemaProps{
- Description: "Service defines a Kubernetes service of TiDB cluster. Optional: No kubernetes service will be created by default.",
- Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBServiceSpec"),
- },
- },
- "binlogEnabled": {
+ "privileged": {
SchemaProps: spec.SchemaProps{
- Description: "Whether enable TiDB Binlog, it is encouraged to not set this field and rely on the default behavior Optional: Defaults to true if PumpSpec is non-nil, otherwise false",
+ Description: "Whether create the TiFlash container in privileged mode, it is highly discouraged to enable this in critical environment. Optional: defaults to false",
Type: []string{"boolean"},
Format: "",
},
},
"maxFailoverCount": {
SchemaProps: spec.SchemaProps{
- Description: "MaxFailoverCount limit the max replicas could be added in failover, 0 means unlimited Optional: Defaults to 0",
+ Description: "MaxFailoverCount limit the max replicas could be added in failover, 0 means no failover Optional: Defaults to 3",
Type: []string{"integer"},
Format: "int32",
},
},
- "separateSlowLog": {
- SchemaProps: spec.SchemaProps{
- Description: "Whether output the slow log in an separate sidecar container Optional: Defaults to true",
- Type: []string{"boolean"},
- Format: "",
- },
- },
- "enableTLSClient": {
- SchemaProps: spec.SchemaProps{
- Description: "Whether enable the TLS connection between the SQL client and TiDB server Optional: Defaults to false",
- Type: []string{"boolean"},
- Format: "",
- },
- },
- "slowLogTailer": {
- SchemaProps: spec.SchemaProps{
- Description: "The spec of the slow log tailer sidecar",
- Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSlowLogTailerSpec"),
- },
- },
- "plugins": {
+ "storageClaims": {
SchemaProps: spec.SchemaProps{
- Description: "Plugins is a list of plugins that are loaded by TiDB server, empty means plugin disabled",
+ Description: "The persistent volume claims of the TiFlash data storages. TiFlash supports multiple disks.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
- Type: []string{"string"},
- Format: "",
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StorageClaim"),
},
},
},
@@ -3556,16 +4552,22 @@ func schema_pkg_apis_pingcap_v1alpha1_TiDBSpec(ref common.ReferenceCallback) com
},
"config": {
SchemaProps: spec.SchemaProps{
- Description: "Config is the Configuration of tidb-servers",
- Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBConfig"),
+ Description: "Config is the Configuration of TiFlash",
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiFlashConfig"),
+ },
+ },
+ "logTailer": {
+ SchemaProps: spec.SchemaProps{
+ Description: "LogTailer is the configurations of the log tailers for TiFlash",
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.LogTailerSpec"),
},
},
},
- Required: []string{"replicas"},
+ Required: []string{"replicas", "storageClaims"},
},
},
Dependencies: []string{
- "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBServiceSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSlowLogTailerSpec", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.LogTailerSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.StorageClaim", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiFlashConfig", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
@@ -3928,9 +4930,16 @@ func schema_pkg_apis_pingcap_v1alpha1_TiKVClient(ref common.ReferenceCallback) c
Format: "int64",
},
},
+ "copr-cache": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.CoprocessorCache"),
+ },
+ },
},
},
},
+ Dependencies: []string{
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.CoprocessorCache"},
}
}
@@ -4022,11 +5031,16 @@ func schema_pkg_apis_pingcap_v1alpha1_TiKVConfig(ref common.ReferenceCallback) c
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSecurityConfig"),
},
},
+ "encryption": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVEncryptionConfig"),
+ },
+ },
},
},
},
Dependencies: []string{
- "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCoprocessorConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVDbConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVGCConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVImportConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVPDConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVRaftDBConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVRaftstoreConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVReadPoolConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSecurityConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVServerConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVStorageConfig"},
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVCoprocessorConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVDbConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVEncryptionConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVGCConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVImportConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVPDConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVRaftDBConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVRaftstoreConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVReadPoolConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSecurityConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVServerConfig", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVStorageConfig"},
}
}
@@ -4053,28 +5067,28 @@ func schema_pkg_apis_pingcap_v1alpha1_TiKVCoprocessorConfig(ref common.Reference
},
"region-max-size": {
SchemaProps: spec.SchemaProps{
- Description: "When Region [a,e) size exceeds `region_max_size`, it will be split into several Regions [a,b), [b,c), [c,d), [d,e) and the size of [a,b), [b,c), [c,d) will be `region_split_size` (or a little larger). See also: region-split-size Optional: Defaults to 144MB optional",
+ Description: "When Region [a,e) size exceeds `region-max-size`, it will be split into several Regions [a,b), [b,c), [c,d), [d,e) and the size of [a,b), [b,c), [c,d) will be `region-split-size` (or a little larger). See also: region-split-size Optional: Defaults to 144MB optional",
Type: []string{"string"},
Format: "",
},
},
"region-split-size": {
SchemaProps: spec.SchemaProps{
- Description: "When Region [a,e) size exceeds `region_max_size`, it will be split into several Regions [a,b), [b,c), [c,d), [d,e) and the size of [a,b), [b,c), [c,d) will be `region_split_size` (or a little larger). See also: region-max-size Optional: Defaults to 96MB optional",
+ Description: "When Region [a,e) size exceeds `region-max-size`, it will be split into several Regions [a,b), [b,c), [c,d), [d,e) and the size of [a,b), [b,c), [c,d) will be `region-split-size` (or a little larger). See also: region-max-size Optional: Defaults to 96MB optional",
Type: []string{"string"},
Format: "",
},
},
"region-max-keys": {
SchemaProps: spec.SchemaProps{
- Description: "When the number of keys in Region [a,e) exceeds the `region_max_keys`, it will be split into several Regions [a,b), [b,c), [c,d), [d,e) and the number of keys in [a,b), [b,c), [c,d) will be `region_split_keys`. See also: region-split-keys Optional: Defaults to 1440000 optional",
+ Description: "When the number of keys in Region [a,e) exceeds the `region-max-keys`, it will be split into several Regions [a,b), [b,c), [c,d), [d,e) and the number of keys in [a,b), [b,c), [c,d) will be `region-split-keys`. See also: region-split-keys Optional: Defaults to 1440000 optional",
Type: []string{"integer"},
Format: "int64",
},
},
"region-split-keys": {
SchemaProps: spec.SchemaProps{
- Description: "When the number of keys in Region [a,e) exceeds the `region_max_keys`, it will be split into several Regions [a,b), [b,c), [c,d), [d,e) and the number of keys in [a,b), [b,c), [c,d) will be `region_split_keys`. See also: region-max-keys Optional: Defaults to 960000 optional",
+ Description: "When the number of keys in Region [a,e) exceeds the `region-max-keys`, it will be split into several Regions [a,b), [b,c), [c,d), [d,e) and the number of keys in [a,b), [b,c), [c,d) will be `region-split-keys`. See also: region-max-keys Optional: Defaults to 960000 optional",
Type: []string{"integer"},
Format: "int64",
},
@@ -4091,49 +5105,49 @@ func schema_pkg_apis_pingcap_v1alpha1_TiKVCoprocessorReadPoolConfig(ref common.R
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
- "high_concurrency": {
+ "high-concurrency": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 8",
Type: []string{"integer"},
Format: "int64",
},
},
- "normal_concurrency": {
+ "normal-concurrency": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 8",
Type: []string{"integer"},
Format: "int64",
},
},
- "low_concurrency": {
+ "low-concurrency": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 8",
Type: []string{"integer"},
Format: "int64",
},
},
- "max_tasks_per_worker_high": {
+ "max-tasks-per-worker-high": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 2000",
Type: []string{"integer"},
Format: "int64",
},
},
- "max_tasks_per_worker_normal": {
+ "max-tasks-per-worker-normal": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 2000",
Type: []string{"integer"},
Format: "int64",
},
},
- "max_tasks_per_worker_low": {
+ "max-tasks-per-worker-low": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 2000",
Type: []string{"integer"},
Format: "int64",
},
},
- "stack_size": {
+ "stack-size": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 10MB",
Type: []string{"string"},
@@ -4340,20 +5354,126 @@ func schema_pkg_apis_pingcap_v1alpha1_TiKVDbConfig(ref common.ReferenceCallback)
}
}
+func schema_pkg_apis_pingcap_v1alpha1_TiKVEncryptionConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "method": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Encrypyion method, use data key encryption raw rocksdb data Possible values: plaintext, aes128-ctr, aes192-ctr, aes256-ctr Optional: Default to plaintext optional",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "data-key-rotation-period": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The frequency of datakey rotation, It managered by tikv Optional: default to 7d optional",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "master-key": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Master key config",
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVMasterKeyConfig"),
+ },
+ },
+ "previous-master-key": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Previous master key config It used in master key rotation, the data key should decryption by previous master key and then encrypytion by new master key",
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVMasterKeyConfig"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVMasterKeyConfig"},
+ }
+}
+
func schema_pkg_apis_pingcap_v1alpha1_TiKVGCConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
- " batch_keys": {
+ " batch-keys": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional: Defaults to 512",
+ Type: []string{"integer"},
+ Format: "int64",
+ },
+ },
+ " max-write-bytes-per-sec": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_pkg_apis_pingcap_v1alpha1_TiKVImportConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "import-dir": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "num-threads": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"integer"},
+ Format: "int64",
+ },
+ },
+ "num-import-jobs": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"integer"},
+ Format: "int64",
+ },
+ },
+ "num-import-sst-jobs": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"integer"},
+ Format: "int64",
+ },
+ },
+ "max-prepare-duration": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "region-split-size": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "stream-channel-window": {
SchemaProps: spec.SchemaProps{
- Description: "Optional: Defaults to 512",
- Type: []string{"integer"},
- Format: "int64",
+ Type: []string{"integer"},
+ Format: "int64",
+ },
+ },
+ "max-open-engines": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"integer"},
+ Format: "int64",
},
},
- " max_write_bytes_per_sec": {
+ "upload-speed-limit": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
@@ -4365,67 +5485,70 @@ func schema_pkg_apis_pingcap_v1alpha1_TiKVGCConfig(ref common.ReferenceCallback)
}
}
-func schema_pkg_apis_pingcap_v1alpha1_TiKVImportConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
+func schema_pkg_apis_pingcap_v1alpha1_TiKVMasterKeyConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
- "import_dir": {
- SchemaProps: spec.SchemaProps{
- Type: []string{"string"},
- Format: "",
- },
- },
- "num_threads": {
+ "type": {
SchemaProps: spec.SchemaProps{
- Type: []string{"integer"},
- Format: "int64",
+ Description: "Use KMS encryption or use file encryption, possible values: kms, file If set to kms, kms MasterKeyKMSConfig should be filled, if set to file MasterKeyFileConfig should be filled optional",
+ Type: []string{"string"},
+ Format: "",
},
},
- "num_import_jobs": {
+ "method": {
SchemaProps: spec.SchemaProps{
- Type: []string{"integer"},
- Format: "int64",
+ Description: "Encrypyion method, use master key encryption data key Possible values: plaintext, aes128-ctr, aes192-ctr, aes256-ctr Optional: Default to plaintext optional",
+ Type: []string{"string"},
+ Format: "",
},
},
- "num_import_sst_jobs": {
+ "path": {
SchemaProps: spec.SchemaProps{
- Type: []string{"integer"},
- Format: "int64",
+ Description: "Text file containing the key in hex form, end with '\n'",
+ Type: []string{"string"},
+ Format: "",
},
},
- "max_prepare_duration": {
+ "key-id": {
SchemaProps: spec.SchemaProps{
- Type: []string{"string"},
- Format: "",
+ Description: "AWS CMK key-id it can be find in AWS Console or use aws cli This field is required",
+ Type: []string{"string"},
+ Format: "",
},
},
- "region_split_size": {
+ "access-key": {
SchemaProps: spec.SchemaProps{
- Type: []string{"string"},
- Format: "",
+ Description: "AccessKey of AWS user, leave empty if using other authrization method optional",
+ Type: []string{"string"},
+ Format: "",
},
},
- "stream_channel_window": {
+ "secret-access-key": {
SchemaProps: spec.SchemaProps{
- Type: []string{"integer"},
- Format: "int64",
+ Description: "SecretKey of AWS user, leave empty if using other authrization method optional",
+ Type: []string{"string"},
+ Format: "",
},
},
- "max_open_engines": {
+ "region": {
SchemaProps: spec.SchemaProps{
- Type: []string{"integer"},
- Format: "int64",
+ Description: "Region of this KMS key Optional: Default to us-east-1 optional",
+ Type: []string{"string"},
+ Format: "",
},
},
- "upload_speed_limit": {
+ "endpoint": {
SchemaProps: spec.SchemaProps{
- Type: []string{"string"},
- Format: "",
+ Description: "Used for KMS compatible KMS, such as Ceph, minio, If use AWS, leave empty optional",
+ Type: []string{"string"},
+ Format: "",
},
},
},
+ Required: []string{"path", "key-id"},
},
},
}
@@ -4451,21 +5574,21 @@ func schema_pkg_apis_pingcap_v1alpha1_TiKVPDConfig(ref common.ReferenceCallback)
},
},
},
- "retry_interval": {
+ "retry-interval": {
SchemaProps: spec.SchemaProps{
Description: "The interval at which to retry a PD connection initialization.\n\nDefault is 300ms. Optional: Defaults to 300ms",
Type: []string{"string"},
Format: "",
},
},
- "retry_max_count": {
+ "retry-max-count": {
SchemaProps: spec.SchemaProps{
Description: "The maximum number of times to retry a PD connection initialization.\n\nDefault is isize::MAX, represented by -1. Optional: Defaults to -1",
Type: []string{"integer"},
Format: "int64",
},
},
- "retry_log_every": {
+ "retry-log-every": {
SchemaProps: spec.SchemaProps{
Description: "If the client observes the same error message on retry, it can repeat the message only every `n` times.\n\nDefault is 10. Set to 1 to disable this feature. Optional: Defaults to 10",
Type: []string{"integer"},
@@ -4484,139 +5607,139 @@ func schema_pkg_apis_pingcap_v1alpha1_TiKVRaftDBConfig(ref common.ReferenceCallb
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
- "wal_recovery_mode": {
+ "wal-recovery-mode": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
- "wal_dir": {
+ "wal-dir": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
- "wal_ttl_seconds": {
+ "wal-ttl-seconds": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
- "wal_size_limit": {
+ "wal-size-limit": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
- "max_total_wal_size": {
+ "max-total-wal-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
- "max_background_jobs": {
+ "max-background-jobs": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
- "max_manifest_file_size": {
+ "max-manifest-file-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
- "create_if_missing": {
+ "create-if-missing": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
- "max_open_files": {
+ "max-open-files": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
- "enable_statistics": {
+ "enable-statistics": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
- "stats_dump_period": {
+ "stats-dump-period": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
- "compaction_readahead_size": {
+ "compaction-readahead-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
- "info_log_max_size": {
+ "info-log-max-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
- "info_log_roll_time": {
+ "info-log-roll-time": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
- "info_log_keep_log_file_num": {
+ "info-log-keep-log-file-num": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
- "info_log_dir": {
+ "info-log-dir": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
- "max_sub_compactions": {
+ "max-sub-compactions": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
- "writable_file_max_buffer_size": {
+ "writable-file-max-buffer-size": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
- "use_direct_io_for_flush_and_compaction": {
+ "use-direct-io-for-flush-and-compaction": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
- "enable_pipelined_write": {
+ "enable-pipelined-write": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
- "allow_concurrent_memtable_write": {
+ "allow-concurrent-memtable-write": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
- "bytes_per_sync": {
+ "bytes-per-sync": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
- "wal_bytes_per_sync": {
+ "wal-bytes-per-sync": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
@@ -4992,31 +6115,31 @@ func schema_pkg_apis_pingcap_v1alpha1_TiKVSecurityConfig(ref common.ReferenceCal
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
- "ca_path": {
+ "ca-path": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
- "cert_path": {
+ "cert-path": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
- "key_path": {
+ "key-path": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
- "override_ssl_target": {
+ "override-ssl-target": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
- "cipher_file": {
+ "cipher-file": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
@@ -5063,7 +6186,7 @@ func schema_pkg_apis_pingcap_v1alpha1_TiKVServerConfig(ref common.ReferenceCallb
Format: "int32",
},
},
- "grpc_memory_pool_quota": {
+ "grpc-memory-pool-quota": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 32G",
Type: []string{"string"},
@@ -5305,6 +6428,19 @@ func schema_pkg_apis_pingcap_v1alpha1_TiKVSpec(ref common.ReferenceCallback) com
Format: "",
},
},
+ "env": {
+ SchemaProps: spec.SchemaProps{
+ Description: "List of environment variables to set in the container, like v1.Container.Env. Note that following env names cannot be used and may be overrided by tidb-operator built envs. - NAMESPACE - TZ - SERVICE_NAME - PEER_SERVICE_NAME - HEADLESS_SERVICE_NAME - SET_NAME - HOSTNAME - CLUSTER_NAME - POD_NAME - BINLOG_ENABLED - SLOW_LOG_FILE",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/api/core/v1.EnvVar"),
+ },
+ },
+ },
+ },
+ },
"limits": {
SchemaProps: spec.SchemaProps{
Description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
@@ -5333,6 +6469,13 @@ func schema_pkg_apis_pingcap_v1alpha1_TiKVSpec(ref common.ReferenceCallback) com
},
},
},
+ "serviceAccount": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Specify a Service Account for tikv",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
"replicas": {
SchemaProps: spec.SchemaProps{
Description: "The desired ready replicas",
@@ -5356,7 +6499,7 @@ func schema_pkg_apis_pingcap_v1alpha1_TiKVSpec(ref common.ReferenceCallback) com
},
"maxFailoverCount": {
SchemaProps: spec.SchemaProps{
- Description: "MaxFailoverCount limit the max replicas could be added in failover, 0 means unlimited Optional: Defaults to 0",
+ Description: "MaxFailoverCount limit the max replicas could be added in failover, 0 means no failover Optional: Defaults to 3",
Type: []string{"integer"},
Format: "int32",
},
@@ -5379,7 +6522,7 @@ func schema_pkg_apis_pingcap_v1alpha1_TiKVSpec(ref common.ReferenceCallback) com
},
},
Dependencies: []string{
- "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVConfig", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVConfig", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
@@ -5442,49 +6585,49 @@ func schema_pkg_apis_pingcap_v1alpha1_TiKVStorageReadPoolConfig(ref common.Refer
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
- "high_concurrency": {
+ "high-concurrency": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 4",
Type: []string{"integer"},
Format: "int64",
},
},
- "normal_concurrency": {
+ "normal-concurrency": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 4",
Type: []string{"integer"},
Format: "int64",
},
},
- "low_concurrency": {
+ "low-concurrency": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 4",
Type: []string{"integer"},
Format: "int64",
},
},
- "max_tasks_per_worker_high": {
+ "max-tasks-per-worker-high": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 2000",
Type: []string{"integer"},
Format: "int64",
},
},
- "max_tasks_per_worker_normal": {
+ "max-tasks-per-worker-normal": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 2000",
Type: []string{"integer"},
Format: "int64",
},
},
- "max_tasks_per_worker_low": {
+ "max-tasks-per-worker-low": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 2000",
Type: []string{"integer"},
Format: "int64",
},
},
- "stack_size": {
+ "stack-size": {
SchemaProps: spec.SchemaProps{
Description: "Optional: Defaults to 10MB",
Type: []string{"string"},
@@ -5656,6 +6799,27 @@ func schema_pkg_apis_pingcap_v1alpha1_TidbAutoScalerSpec(ref common.ReferenceCal
},
},
},
+ "metricsTimeDuration": {
+ SchemaProps: spec.SchemaProps{
+ Description: "MetricsTimeDuration describe the Time duration to be queried in the Prometheus",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "scaleOutThreshold": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ScaleOutThreshold describe the consecutive threshold for the auto-scaling, if the consecutive counts of the scale-out result in auto-scaling reach this number, the auto-scaling would be performed. If not set, the default value is 3.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "scaleInThreshold": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ScaleInThreshold describe the consecutive threshold for the auto-scaling, if the consecutive counts of the scale-in result in auto-scaling reach this number, the auto-scaling would be performed. If not set, the default value is 5.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
},
Required: []string{"maxReplicas"},
},
@@ -5665,6 +6829,55 @@ func schema_pkg_apis_pingcap_v1alpha1_TidbAutoScalerSpec(ref common.ReferenceCal
}
}
+func schema_pkg_apis_pingcap_v1alpha1_TidbAutoScalerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "TidbAutoScalerStatus describe the auto-scaling status of tidb",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "metrics": {
+ SchemaProps: spec.SchemaProps{
+ Description: "MetricsStatusList describes the metrics status in the last auto-scaling reconciliation",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.MetricsStatus"),
+ },
+ },
+ },
+ },
+ },
+ "currentReplicas": {
+ SchemaProps: spec.SchemaProps{
+ Description: "CurrentReplicas describes the current replicas for the component(tidb/tikv)",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "recommendedReplicas": {
+ SchemaProps: spec.SchemaProps{
+ Description: "RecommendedReplicas describes the calculated replicas in the last auto-scaling reconciliation for the component(tidb/tikv)",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "lastAutoScalingTimestamp": {
+ SchemaProps: spec.SchemaProps{
+ Description: "LastAutoScalingTimestamp describes the last auto-scaling timestamp for the component(tidb/tikv)",
+ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
+ },
+ },
+ },
+ Required: []string{"currentReplicas"},
+ },
+ },
+ Dependencies: []string{
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.MetricsStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
+ }
+}
+
func schema_pkg_apis_pingcap_v1alpha1_TidbCluster(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@@ -5805,6 +7018,12 @@ func schema_pkg_apis_pingcap_v1alpha1_TidbClusterAutoScalerSpec(ref common.Refer
Format: "",
},
},
+ "monitor": {
+ SchemaProps: spec.SchemaProps{
+ Description: "TidbMonitorRef describe the target TidbMonitor, when MetricsUrl and Monitor are both set, Operator will use MetricsUrl",
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbMonitorRef"),
+ },
+ },
"tikv": {
SchemaProps: spec.SchemaProps{
Description: "TiKV represents the auto-scaling spec for tikv",
@@ -5822,7 +7041,34 @@ func schema_pkg_apis_pingcap_v1alpha1_TidbClusterAutoScalerSpec(ref common.Refer
},
},
Dependencies: []string{
- "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbAutoScalerSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterRef", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TikvAutoScalerSpec"},
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbAutoScalerSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbClusterRef", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbMonitorRef", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TikvAutoScalerSpec"},
+ }
+}
+
+func schema_pkg_apis_pingcap_v1alpha1_TidbClusterAutoSclaerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "TidbClusterAutoSclaerStatus describe the whole status",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "tikv": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Tikv describes the status for the tikv in the last auto-scaling reconciliation",
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TikvAutoScalerStatus"),
+ },
+ },
+ "tidb": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Tidb describes the status for the tidb in the last auto-scaling reconciliation",
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbAutoScalerStatus"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TidbAutoScalerStatus", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TikvAutoScalerStatus"},
}
}
@@ -5921,6 +7167,12 @@ func schema_pkg_apis_pingcap_v1alpha1_TidbClusterSpec(ref common.ReferenceCallba
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSpec"),
},
},
+ "tiflash": {
+ SchemaProps: spec.SchemaProps{
+ Description: "TiFlash cluster spec",
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiFlashSpec"),
+ },
+ },
"pump": {
SchemaProps: spec.SchemaProps{
Description: "Pump cluster spec",
@@ -5933,6 +7185,13 @@ func schema_pkg_apis_pingcap_v1alpha1_TidbClusterSpec(ref common.ReferenceCallba
Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.HelperSpec"),
},
},
+ "paused": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Indicates that the tidb cluster is paused and will not be processed by the controller.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
"version": {
SchemaProps: spec.SchemaProps{
Description: "TiDB cluster version",
@@ -5975,11 +7234,10 @@ func schema_pkg_apis_pingcap_v1alpha1_TidbClusterSpec(ref common.ReferenceCallba
Format: "",
},
},
- "enableTLSCluster": {
+ "tlsCluster": {
SchemaProps: spec.SchemaProps{
- Description: "Enable TLS connection between TiDB server components Optional: Defaults to false",
- Type: []string{"boolean"},
- Format: "",
+ Description: "Whether enable the TLS connection between TiDB server components Optional: Defaults to nil",
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TLSCluster"),
},
},
"hostNetwork": {
@@ -6034,7 +7292,7 @@ func schema_pkg_apis_pingcap_v1alpha1_TidbClusterSpec(ref common.ReferenceCallba
},
"tolerations": {
SchemaProps: spec.SchemaProps{
- Description: "Base tolerations of TiDB cluster Pods, components may add more tolreations upon this respectively",
+ Description: "Base tolerations of TiDB cluster Pods, components may add more tolerations upon this respectively",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
@@ -6057,7 +7315,7 @@ func schema_pkg_apis_pingcap_v1alpha1_TidbClusterSpec(ref common.ReferenceCallba
},
},
Dependencies: []string{
- "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.HelperSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PumpSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSpec", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Toleration"},
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.HelperSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PumpSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TLSCluster", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiDBSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiFlashSpec", "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.TiKVSpec", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Toleration"},
}
}
@@ -6361,6 +7619,34 @@ func schema_pkg_apis_pingcap_v1alpha1_TidbMonitorList(ref common.ReferenceCallba
}
}
+func schema_pkg_apis_pingcap_v1alpha1_TidbMonitorRef(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "TidbMonitorRef reference to a TidbMonitor",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Namespace is the namespace that TidbMonitor object locates, default to the same namespace with TidbClusterAutoScaler",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Name is the name of TidbMonitor object",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"name"},
+ },
+ },
+ }
+}
+
func schema_pkg_apis_pingcap_v1alpha1_TidbMonitorSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@@ -6535,6 +7821,27 @@ func schema_pkg_apis_pingcap_v1alpha1_TikvAutoScalerSpec(ref common.ReferenceCal
},
},
},
+ "metricsTimeDuration": {
+ SchemaProps: spec.SchemaProps{
+ Description: "MetricsTimeDuration describe the Time duration to be queried in the Prometheus",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "scaleOutThreshold": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ScaleOutThreshold describe the consecutive threshold for the auto-scaling, if the consecutive counts of the scale-out result in auto-scaling reach this number, the auto-scaling would be performed. If not set, the default value is 3.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "scaleInThreshold": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ScaleInThreshold describe the consecutive threshold for the auto-scaling, if the consecutive counts of the scale-in result in auto-scaling reach this number, the auto-scaling would be performed. If not set, the default value is 5.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
},
Required: []string{"maxReplicas"},
},
@@ -6544,6 +7851,55 @@ func schema_pkg_apis_pingcap_v1alpha1_TikvAutoScalerSpec(ref common.ReferenceCal
}
}
+func schema_pkg_apis_pingcap_v1alpha1_TikvAutoScalerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "TikvAutoScalerStatus describe the auto-scaling status of tikv",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "metrics": {
+ SchemaProps: spec.SchemaProps{
+ Description: "MetricsStatusList describes the metrics status in the last auto-scaling reconciliation",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.MetricsStatus"),
+ },
+ },
+ },
+ },
+ },
+ "currentReplicas": {
+ SchemaProps: spec.SchemaProps{
+ Description: "CurrentReplicas describes the current replicas for the component(tidb/tikv)",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "recommendedReplicas": {
+ SchemaProps: spec.SchemaProps{
+ Description: "RecommendedReplicas describes the calculated replicas in the last auto-scaling reconciliation for the component(tidb/tikv)",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "lastAutoScalingTimestamp": {
+ SchemaProps: spec.SchemaProps{
+ Description: "LastAutoScalingTimestamp describes the last auto-scaling timestamp for the component(tidb/tikv)",
+ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
+ },
+ },
+ },
+ Required: []string{"currentReplicas"},
+ },
+ },
+ Dependencies: []string{
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.MetricsStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
+ }
+}
+
func schema_pkg_apis_pingcap_v1alpha1_TxnLocalLatches(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
diff --git a/pkg/apis/pingcap/v1alpha1/pd_config.go b/pkg/apis/pingcap/v1alpha1/pd_config.go
index 406fa564e2..f3b2e6612f 100644
--- a/pkg/apis/pingcap/v1alpha1/pd_config.go
+++ b/pkg/apis/pingcap/v1alpha1/pd_config.go
@@ -13,11 +13,6 @@
package v1alpha1
-import (
- "strconv"
- "strings"
-)
-
// Maintain a copy of PDConfig to make it more friendly with the kubernetes API:
//
// - add 'omitempty' json and toml tag to avoid passing the empty value of primitive types to tidb-server, e.g. 0 of int
@@ -33,7 +28,6 @@ import (
// PDConfig is the configuration of pd-server
// +k8s:openapi-gen=true
type PDConfig struct {
-
// +optional
ForceNewCluster *bool `json:"force-new-cluster,omitempty"`
// Optional: Defaults to true
@@ -124,6 +118,16 @@ type PDConfig struct {
// Optional: Defaults to true
// +optional
NamespaceClassifier string `toml:"namespace-classifier,omitempty" json:"namespace-classifier,omitempty"`
+
+ // +optional
+ Dashboard *DashboardConfig `toml:"dashboard,omitempty" json:"dashboard,omitempty"`
+}
+
+// DashboardConfig is the configuration for tidb-dashboard.
+type DashboardConfig struct {
+ TiDBCAPath string `toml:"tidb-cacert-path,omitempty" json:"tidb_cacert_path,omitempty"`
+ TiDBCertPath string `toml:"tidb-cert-path,omitempty" json:"tidb_cert_path,omitempty"`
+ TiDBKeyPath string `toml:"tidb-key-path,omitempty" json:"tidb_key_path,omitempty"`
}
// PDLogConfig serializes log related config in toml/json.
@@ -177,11 +181,16 @@ type PDReplicationConfig struct {
// Immutable, change should be made through pd-ctl after cluster creation
// +k8s:openapi-gen=false
// +optional
- LocationLabels StringSlice `toml:"location-labels,omitempty" json:"location-labels,omitempty"`
+ LocationLabels []string `toml:"location-labels,omitempty" json:"location-labels,omitempty"`
// StrictlyMatchLabel strictly checks if the label of TiKV is matched with LocaltionLabels.
- // Immutable, change should be made through pd-ctl after cluster creation
+ // Immutable, change should be made through pd-ctl after cluster creation.
+ // Imported from v3.1.0
// +optional
StrictlyMatchLabel *bool `toml:"strictly-match-label,omitempty" json:"strictly-match-label,string,omitempty"`
+
+ // When PlacementRules feature is enabled. MaxReplicas and LocationLabels are not used anymore.
+ // +optional
+ EnablePlacementRules *bool `toml:"enable-placement-rules" json:"enable-placement-rules,string,omitempty"`
}
// PDNamespaceConfig is to overwrite the global setting for specific namespace
@@ -247,8 +256,9 @@ type PDScheduleConfig struct {
// +optional
MaxStoreDownTime string `toml:"max-store-down-time,omitempty" json:"max-store-down-time,omitempty"`
// LeaderScheduleLimit is the max coexist leader schedules.
- // Immutable, change should be made through pd-ctl after cluster creation
- // Optional: Defaults to 4
+ // Immutable, change should be made through pd-ctl after cluster creation.
+ // Optional: Defaults to 4.
+ // Imported from v3.1.0
// +optional
LeaderScheduleLimit *uint64 `toml:"leader-schedule-limit,omitempty" json:"leader-schedule-limit,omitempty"`
// RegionScheduleLimit is the max coexist region schedules.
@@ -278,7 +288,8 @@ type PDScheduleConfig struct {
// +optional
HotRegionCacheHitsThreshold *uint64 `toml:"hot-region-cache-hits-threshold,omitempty" json:"hot-region-cache-hits-threshold,omitempty"`
// TolerantSizeRatio is the ratio of buffer size for balance scheduler.
- // Immutable, change should be made through pd-ctl after cluster creation
+ // Immutable, change should be made through pd-ctl after cluster creation.
+ // Imported from v3.1.0
// +optional
TolerantSizeRatio *float64 `toml:"tolerant-size-ratio,omitempty" json:"tolerant-size-ratio,omitempty"`
//
@@ -337,6 +348,20 @@ type PDScheduleConfig struct {
// Immutable, change should be made through pd-ctl after cluster creation
// +optional
Schedulers *PDSchedulerConfigs `toml:"schedulers,omitempty" json:"schedulers-v2,omitempty"` // json v2 is for the sake of compatible upgrade
+
+ // Only used to display
+ // +optional
+ SchedulersPayload map[string]string `toml:"schedulers-payload" json:"schedulers-payload,omitempty"`
+
+ // EnableOneWayMerge is the option to enable one way merge. This means a Region can only be merged into the next region of it.
+ // Imported from v3.1.0
+ // +optional
+ EnableOneWayMerge *bool `toml:"enable-one-way-merge" json:"enable-one-way-merge,string,omitempty"`
+ // EnableCrossTableMerge is the option to enable cross table merge. This means two Regions can be merged with different table IDs.
+ // This option only works when key type is "table".
+ // Imported from v3.1.0
+ // +optional
+ EnableCrossTableMerge *bool `toml:"enable-cross-table-merge" json:"enable-cross-table-merge,string,omitempty"`
}
type PDSchedulerConfigs []PDSchedulerConfig
@@ -380,6 +405,10 @@ type PDSecurityConfig struct {
// KeyPath is the path of file that contains X509 key in PEM format.
// +optional
KeyPath string `toml:"key-path,omitempty" json:"key-path,omitempty"`
+ // CertAllowedCN is the Common Name that allowed
+ // +optional
+ // +k8s:openapi-gen=false
+ CertAllowedCN []string `toml:"cert-allowed-cn,omitempty" json:"cert-allowed-cn,omitempty"`
}
// PDServerConfig is the configuration for pd server.
@@ -388,6 +417,11 @@ type PDServerConfig struct {
// UseRegionStorage enables the independent region storage.
// +optional
UseRegionStorage *bool `toml:"use-region-storage,omitempty" json:"use-region-storage,string,omitempty"`
+ // MetricStorage is the cluster metric storage.
+ // Currently we use prometheus as metric storage, we may use PD/TiKV as metric storage later.
+ // Imported from v3.1.0
+ // +optional
+ MetricStorage *string `toml:"metric-storage" json:"metric-storage,omitempty"`
}
// +k8s:openapi-gen=true
@@ -403,35 +437,18 @@ type PDMetricConfig struct {
// +k8s:openapi-gen=true
type FileLogConfig struct {
// Log filename, leave empty to disable file log.
- Filename string `toml:"filename,omitempty" json:"filename,omitempty"`
+ // +optional
+ Filename *string `toml:"filename,omitempty" json:"filename,omitempty"`
// Is log rotate enabled.
- LogRotate bool `toml:"log-rotate,omitempty" json:"log-rotate,omitempty"`
+ // +optional
+ LogRotate *bool `toml:"log-rotate,omitempty" json:"log-rotate,omitempty"`
// Max size for a single file, in MB.
- MaxSize int `toml:"max-size,omitempty" json:"max-size,omitempty"`
+ // +optional
+ MaxSize *int `toml:"max-size,omitempty" json:"max-size,omitempty"`
// Max log keep days, default is never deleting.
- MaxDays int `toml:"max-days,omitempty" json:"max-days,omitempty"`
+ // +optional
+ MaxDays *int `toml:"max-days,omitempty" json:"max-days,omitempty"`
// Maximum number of old log files to retain.
- MaxBackups int `toml:"max-backups,omitempty" json:"max-backups,omitempty"`
-}
-
-//StringSlice is more friendly to json encode/decode
-type StringSlice []string
-
-// MarshalJSON returns the size as a JSON string.
-func (s StringSlice) MarshalJSON() ([]byte, error) {
- return []byte(strconv.Quote(strings.Join(s, ","))), nil
-}
-
-// UnmarshalJSON parses a JSON string into the bytesize.
-func (s *StringSlice) UnmarshalJSON(text []byte) error {
- data, err := strconv.Unquote(string(text))
- if err != nil {
- return err
- }
- if len(data) == 0 {
- *s = nil
- return nil
- }
- *s = strings.Split(data, ",")
- return nil
+ // +optional
+ MaxBackups *int `toml:"max-backups,omitempty" json:"max-backups,omitempty"`
}
diff --git a/pkg/apis/pingcap/v1alpha1/tidb_config.go b/pkg/apis/pingcap/v1alpha1/tidb_config.go
index a203125a2d..bc8ca9964b 100644
--- a/pkg/apis/pingcap/v1alpha1/tidb_config.go
+++ b/pkg/apis/pingcap/v1alpha1/tidb_config.go
@@ -31,6 +31,7 @@ import (
// initially copied from TiDB v3.0.6
// TiDBConfig is the configuration of tidb-server
+// For more detail, refer to https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration/
// +k8s:openapi-gen=true
type TiDBConfig struct {
// +optional
@@ -103,6 +104,33 @@ type TiDBConfig struct {
SplitRegionMaxNum *uint64 `toml:"split-region-max-num,omitempty" json:"split-region-max-num,omitempty"`
// +optional
StmtSummary *StmtSummary `toml:"stmt-summary,omitempty" json:"stmt-summary,omitempty"`
+ // RepairMode indicates that the TiDB is in the repair mode for table meta.
+ // +optional
+ RepairMode *bool `toml:"repair-mode" json:"repair-mode,omitempty"`
+ // +optional
+ RepairTableList []string `toml:"repair-table-list" json:"repair-table-list,omitempty"`
+ // IsolationRead indicates that the TiDB reads data from which isolation level(engine and label).
+ // +optional
+ IsolationRead *IsolationRead `toml:"isolation-read" json:"isolation-read,omitempty"`
+ // MaxServerConnections is the maximum permitted number of simultaneous client connections.
+ // +optional
+ MaxServerConnections *uint32 `toml:"max-server-connections" json:"max-server-connections,omitempty"`
+ // NewCollationsEnabledOnFirstBootstrap indicates if the new collations are enabled, it effects only when a TiDB cluster bootstrapped on the first time.
+ // +optional
+ NewCollationsEnabledOnFirstBootstrap *bool `toml:"new_collations_enabled_on_first_bootstrap" json:"new_collations_enabled_on_first_bootstrap,omitempty"`
+ // Experimental contains parameters for experimental features.
+ // +optional
+ Experimental *Experimental `toml:"experimental" json:"experimental,omitempty"`
+ // EnableDynamicConfig enables the TiDB to fetch configs from PD and update itself during runtime.
+ // see https://github.com/pingcap/tidb/pull/13660 for more details.
+ // +optional
+ EnableDynamicConfig *bool `toml:"enable-dynamic-config" json:"enable-dynamic-config,omitempty"`
+ // imported from v3.1.0
+ // optional
+ EnableTableLock *bool `toml:"enable-table-lock" json:"enable-table-lock,omitempty"`
+ // imported from v3.1.0
+ // optional
+ DelayCleanTableLock *uint64 `toml:"delay-clean-table-lock" json:"delay-clean-table-lock,omitempty"`
}
// Log is the log section of config.
@@ -119,9 +147,20 @@ type Log struct {
// Disable automatic timestamps in output.
// +optional
DisableTimestamp *bool `toml:"disable-timestamp,omitempty" json:"disable-timestamp,omitempty"`
+ // EnableTimestamp enables automatic timestamps in log output.
+ // +optional
+ EnableTimestamp *bool `toml:"enable-timestamp" json:"enable-timestamp,omitempty"`
+ // EnableErrorStack enables annotating logs with the full stack error
+ // message.
+ // +optional
+ EnableErrorStack *bool `toml:"enable-error-stack" json:"enable-error-stack,omitempty"`
// File log config.
// +optional
File *FileLogConfig `toml:"file,omitempty" json:"file,omitempty"`
+ // +optional
+ EnableSlowLog *bool `toml:"enable-slow-log" json:"enable-slow-log,omitempty"`
+ // +optional
+ SlowQueryFile *string `toml:"slow-query-file" json:"slow-query-file,omitempty"`
// Optional: Defaults to 300
// +optional
SlowThreshold *uint64 `toml:"slow-threshold,omitempty" json:"slow-threshold,omitempty"`
@@ -133,7 +172,7 @@ type Log struct {
QueryLogMaxLen *uint64 `toml:"query-log-max-len,omitempty" json:"query-log-max-len,omitempty"`
// Optional: Defaults to 1
// +optional
- RecordPlanInSlowLog uint32 `toml:"record-plan-in-slow-log,omitempty" json:"record-plan-in-slow-log,omitempty"`
+ RecordPlanInSlowLog *uint32 `toml:"record-plan-in-slow-log,omitempty" json:"record-plan-in-slow-log,omitempty"`
}
// Security is the security section of the config.
@@ -153,19 +192,23 @@ type Security struct {
ClusterSSLCert *string `toml:"cluster-ssl-cert,omitempty" json:"cluster-ssl-cert,omitempty"`
// +optional
ClusterSSLKey *string `toml:"cluster-ssl-key,omitempty" json:"cluster-ssl-key,omitempty"`
+ // ClusterVerifyCN is the Common Name that allowed
+ // +optional
+ // +k8s:openapi-gen=false
+ ClusterVerifyCN []string `toml:"cluster-verify-cn,omitempty" json:"cluster-verify-cn,omitempty"`
}
// Status is the status section of the config.
// +k8s:openapi-gen=true
type Status struct {
- // Optional: Defaults to true
- // +optional
- ReportStatus *bool `toml:"report-status,omitempty" json:"report-status,omitempty"`
// +optional
MetricsAddr *string `toml:"metrics-addr,omitempty" json:"metrics-addr,omitempty"`
// Optional: Defaults to 15
// +optional
MetricsInterval *uint `toml:"metrics-interval,omitempty" json:"metrics-interval,omitempty"`
+ // Optional: Defaults to true
+ // +optional
+ ReportStatus *bool `toml:"report-status,omitempty" json:"report-status,omitempty"`
// Optional: Defaults to false
// +optional
RecordQPSbyDB *bool `toml:"record-db-qps,omitempty" json:"record-db-qps,omitempty"`
@@ -179,18 +222,9 @@ type Performance struct {
// Optional: Defaults to 0
// +optional
MaxMemory *uint64 `toml:"max-memory,omitempty" json:"max-memory,omitempty"`
- // Optional: Defaults to true
- // +optional
- TCPKeepAlive *bool `toml:"tcp-keep-alive,omitempty" json:"tcp-keep-alive,omitempty"`
- // Optional: Defaults to true
- // +optional
- CrossJoin *bool `toml:"cross-join,omitempty" json:"cross-join,omitempty"`
// Optional: Defaults to 3s
// +optional
StatsLease *string `toml:"stats-lease,omitempty" json:"stats-lease,omitempty"`
- // Optional: Defaults to true
- // +optional
- RunAutoAnalyze *bool `toml:"run-auto-analyze,omitempty" json:"run-auto-analyze,omitempty"`
// Optional: Defaults to 5000
// +optional
StmtCountLimit *uint `toml:"stmt-count-limit,omitempty" json:"stmt-count-limit,omitempty"`
@@ -209,12 +243,21 @@ type Performance struct {
// Optional: Defaults to 3s
// +optional
BindInfoLease *string `toml:"bind-info-lease,omitempty" json:"bind-info-lease,omitempty"`
- // Optional: Defaults to 300000
- // +optional
- TxnEntryCountLimit *uint64 `toml:"txn-entry-count-limit,omitempty" json:"txn-entry-count-limit,omitempty"`
// Optional: Defaults to 104857600
// +optional
TxnTotalSizeLimit *uint64 `toml:"txn-total-size-limit,omitempty" json:"txn-total-size-limit,omitempty"`
+ // Optional: Defaults to true
+ // +optional
+ TCPKeepAlive *bool `toml:"tcp-keep-alive,omitempty" json:"tcp-keep-alive,omitempty"`
+ // Optional: Defaults to true
+ // +optional
+ CrossJoin *bool `toml:"cross-join,omitempty" json:"cross-join,omitempty"`
+ // Optional: Defaults to true
+ // +optional
+ RunAutoAnalyze *bool `toml:"run-auto-analyze,omitempty" json:"run-auto-analyze,omitempty"`
+ // Optional: Defaults to 300000
+ // +optional
+ TxnEntryCountLimit *uint64 `toml:"txn-entry-count-limit,omitempty" json:"txn-entry-count-limit,omitempty"`
}
// PlanCache is the PlanCache section of the config.
@@ -360,11 +403,32 @@ type TiKVClient struct {
// Optional: Defaults to 0
// +optional
StoreLimit int64 `toml:"store-limit,omitempty" json:"store-limit,omitempty"`
+ // +optional
+ CoprCache *CoprocessorCache `toml:"copr-cache" json:"copr-cache,omitempty"`
+}
+
+// CoprocessorCache is the config for coprocessor cache.
+type CoprocessorCache struct {
+ // Whether to enable the copr cache. The copr cache saves the result from TiKV Coprocessor in the memory and
+ // reuses the result when corresponding data in TiKV is unchanged, on a region basis.
+ // +optional
+ Enabled *bool `toml:"enabled" json:"enabled,omitempty"`
+ // The capacity in MB of the cache.
+ // +optional
+ CapacityMB *float64 `toml:"capacity-mb" json:"capacity-mb,omitempty"`
+ // Only cache requests whose result set is small.
+ // +optional
+ AdmissionMaxResultMB *float64 `toml:"admission-max-result-mb" json:"admission-max-result-mb,omitempty"`
+ // Only cache requests takes notable time to process.
+ // +optional
+ AdmissionMinProcessMs *uint64 `toml:"admission-min-process-ms" json:"admission-min-process-ms,omitempty"`
}
// Binlog is the config for binlog.
// +k8s:openapi-gen=true
type Binlog struct {
+ // optional
+ Enable *bool `toml:"enable" json:"enable,omitempty"`
// Optional: Defaults to 15s
// +optional
WriteTimeout *string `toml:"write-timeout,omitempty" json:"write-timeout,omitempty"`
@@ -406,6 +470,9 @@ type PessimisticTxn struct {
// StmtSummary is the config for statement summary.
// +k8s:openapi-gen=true
type StmtSummary struct {
+ // Enable statement summary or not.
+ // +optional
+ Enable *bool `toml:"enable" json:"enable,omitempty"`
// The maximum number of statements kept in memory.
// Optional: Defaults to 100
// +optional
@@ -414,4 +481,29 @@ type StmtSummary struct {
// Optional: Defaults to 4096
// +optional
MaxSQLLength *uint `toml:"max-sql-length,omitempty" json:"max-sql-length,omitempty"`
+ // The refresh interval of statement summary.
+ // +optional
+ RefreshInterval *int `toml:"refresh-interval" json:"refresh-interval,omitempty"`
+ // The maximum history size of statement summary.
+ // +optional
+ HistorySize *int `toml:"history-size" json:"history-size,omitempty"`
+}
+
+// IsolationRead is the config for isolation read.
+// +k8s:openapi-gen=true
+type IsolationRead struct {
+ // Engines filters tidb-server access paths by engine type.
+ // imported from v3.1.0
+ // +optional
+ Engines []string `toml:"engines" json:"engines,omitempty"`
+}
+
+// Experimental controls the features that are still experimental: their semantics, interfaces are subject to change.
+// Using these features in the production environment is not recommended.
+// +k8s:openapi-gen=true
+type Experimental struct {
+ // Whether enable the syntax like `auto_random(3)` on the primary key column.
+ // imported from TiDB v3.1.0
+ // +optional
+ AllowAutoRandom *bool `toml:"allow-auto-random" json:"allow-auto-random,omitempty"`
}
diff --git a/pkg/apis/pingcap/v1alpha1/tidb_config_test.go b/pkg/apis/pingcap/v1alpha1/tidb_config_test.go
index 4e13c68393..f4f8120741 100644
--- a/pkg/apis/pingcap/v1alpha1/tidb_config_test.go
+++ b/pkg/apis/pingcap/v1alpha1/tidb_config_test.go
@@ -51,8 +51,8 @@ func TestTiDBConfig(t *testing.T) {
tStr := buff.String()
g.Expect(tStr).To((Equal(`[performance]
max-procs = 8
- cross-join = true
feedback-probability = 0.8
+ cross-join = true
`)))
var tUnmarshaled TiDBConfig
diff --git a/pkg/apis/pingcap/v1alpha1/tidbcluster.go b/pkg/apis/pingcap/v1alpha1/tidbcluster.go
index dc936c025f..828d332481 100644
--- a/pkg/apis/pingcap/v1alpha1/tidbcluster.go
+++ b/pkg/apis/pingcap/v1alpha1/tidbcluster.go
@@ -66,6 +66,36 @@ func (tc *TidbCluster) TiKVImage() string {
return image
}
+func (tc *TidbCluster) TiKVContainerPrivilege() *bool {
+ if tc.Spec.TiKV.Privileged == nil {
+ pri := false
+ return &pri
+ }
+ return tc.Spec.TiKV.Privileged
+}
+
+func (tc *TidbCluster) TiFlashImage() string {
+ image := tc.Spec.TiFlash.Image
+ baseImage := tc.Spec.TiFlash.BaseImage
+ // base image takes higher priority
+ if baseImage != "" {
+ version := tc.Spec.TiFlash.Version
+ if version == nil {
+ version = &tc.Spec.Version
+ }
+ image = fmt.Sprintf("%s:%s", baseImage, *version)
+ }
+ return image
+}
+
+func (tc *TidbCluster) TiFlashContainerPrivilege() *bool {
+ if tc.Spec.TiFlash.Privileged == nil {
+ pri := false
+ return &pri
+ }
+ return tc.Spec.TiFlash.Privileged
+}
+
func (tc *TidbCluster) TiDBImage() string {
image := tc.Spec.TiDB.Image
baseImage := tc.Spec.TiDB.BaseImage
@@ -216,6 +246,36 @@ func (tc *TidbCluster) TiKVStsActualReplicas() int32 {
return stsStatus.Replicas
}
+func (tc *TidbCluster) TiFlashAllPodsStarted() bool {
+ return tc.TiFlashStsDesiredReplicas() == tc.TiFlashStsActualReplicas()
+}
+
+func (tc *TidbCluster) TiFlashAllStoresReady() bool {
+ if int(tc.TiFlashStsDesiredReplicas()) != len(tc.Status.TiFlash.Stores) {
+ return false
+ }
+
+ for _, store := range tc.Status.TiFlash.Stores {
+ if store.State != TiKVStateUp {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (tc *TidbCluster) TiFlashStsDesiredReplicas() int32 {
+ return tc.Spec.TiFlash.Replicas + int32(len(tc.Status.TiFlash.FailureStores))
+}
+
+func (tc *TidbCluster) TiFlashStsActualReplicas() int32 {
+ stsStatus := tc.Status.TiFlash.StatefulSet
+ if stsStatus == nil {
+ return 0
+ }
+ return stsStatus.Replicas
+}
+
func (tc *TidbCluster) TiDBAllPodsStarted() bool {
return tc.TiDBStsDesiredReplicas() == tc.TiDBStsActualReplicas()
}
@@ -299,11 +359,7 @@ func (tc *TidbCluster) GetClusterID() string {
}
func (tc *TidbCluster) IsTLSClusterEnabled() bool {
- enableTLCluster := tc.Spec.EnableTLSCluster
- if enableTLCluster == nil {
- return defaultEnableTLSCluster
- }
- return *enableTLCluster
+ return tc.Spec.TLSCluster != nil && tc.Spec.TLSCluster.Enabled
}
func (tc *TidbCluster) Scheme() string {
@@ -339,17 +395,13 @@ func (tc *TidbCluster) IsTiDBBinlogEnabled() bool {
}
func (tidb *TiDBSpec) IsTLSClientEnabled() bool {
- enableTLSClient := tidb.EnableTLSClient
- if enableTLSClient == nil {
- return defaultEnableTLSClient
- }
- return *enableTLSClient
+ return tidb.TLSClient != nil && tidb.TLSClient.Enabled
}
func (tidb *TiDBSpec) ShouldSeparateSlowLog() bool {
separateSlowLog := tidb.SeparateSlowLog
if separateSlowLog == nil {
- return defaultEnableTLSClient
+ return defaultSeparateSlowLog
}
return *separateSlowLog
}
@@ -379,3 +431,8 @@ func (tc *TidbCluster) GetInstanceName() string {
}
return tc.Name
}
+
+func (tc *TidbCluster) SkipTLSWhenConnectTiDB() bool {
+ _, ok := tc.Annotations[label.AnnSkipTLSWhenConnectTiDB]
+ return ok
+}
diff --git a/pkg/apis/pingcap/v1alpha1/tidbcluster_component.go b/pkg/apis/pingcap/v1alpha1/tidbcluster_component.go
index 8bd0c4e37d..840c24e756 100644
--- a/pkg/apis/pingcap/v1alpha1/tidbcluster_component.go
+++ b/pkg/apis/pingcap/v1alpha1/tidbcluster_component.go
@@ -36,6 +36,7 @@ type ComponentAccessor interface {
DnsPolicy() corev1.DNSPolicy
ConfigUpdateStrategy() ConfigUpdateStrategy
BuildPodSpec() corev1.PodSpec
+ Env() []corev1.EnvVar
}
type componentAccessorImpl struct {
@@ -160,6 +161,10 @@ func (a *componentAccessorImpl) BuildPodSpec() corev1.PodSpec {
return spec
}
+func (a *componentAccessorImpl) Env() []corev1.EnvVar {
+ return a.ComponentSpec.Env
+}
+
// BaseTiDBSpec returns the base spec of TiDB servers
func (tc *TidbCluster) BaseTiDBSpec() ComponentAccessor {
return &componentAccessorImpl{&tc.Spec, &tc.Spec.TiDB.ComponentSpec}
@@ -170,6 +175,11 @@ func (tc *TidbCluster) BaseTiKVSpec() ComponentAccessor {
return &componentAccessorImpl{&tc.Spec, &tc.Spec.TiKV.ComponentSpec}
}
+// BaseTiFlashSpec returns the base spec of TiFlash servers
+func (tc *TidbCluster) BaseTiFlashSpec() ComponentAccessor {
+ return &componentAccessorImpl{&tc.Spec, &tc.Spec.TiFlash.ComponentSpec}
+}
+
// BasePDSpec returns the base spec of PD servers
func (tc *TidbCluster) BasePDSpec() ComponentAccessor {
return &componentAccessorImpl{&tc.Spec, &tc.Spec.PD.ComponentSpec}
diff --git a/pkg/apis/pingcap/v1alpha1/tidbclusterautoscaler_types.go b/pkg/apis/pingcap/v1alpha1/tidbclusterautoscaler_types.go
index 749c48082e..e38a570fa1 100644
--- a/pkg/apis/pingcap/v1alpha1/tidbclusterautoscaler_types.go
+++ b/pkg/apis/pingcap/v1alpha1/tidbclusterautoscaler_types.go
@@ -58,6 +58,11 @@ type TidbClusterAutoScalerSpec struct {
// +optional
MetricsUrl *string `json:"metricsUrl,omitempty"`
+ // TidbMonitorRef describe the target TidbMonitor, when MetricsUrl and Monitor are both set,
+ // Operator will use MetricsUrl
+ // +optional
+ Monitor *TidbMonitorRef `json:"monitor,omitempty"`
+
// TiKV represents the auto-scaling spec for tikv
// +optional
TiKV *TikvAutoScalerSpec `json:"tikv,omitempty"`
@@ -112,8 +117,84 @@ type BasicAutoScalerSpec struct {
// If not set, the default metric will be set to 80% average CPU utilization.
// +optional
Metrics []v2beta2.MetricSpec `json:"metrics,omitempty"`
+
+ // MetricsTimeDuration describe the Time duration to be queried in the Prometheus
+ // +optional
+ MetricsTimeDuration *string `json:"metricsTimeDuration,omitempty"`
+
+ // ScaleOutThreshold describe the consecutive threshold for the auto-scaling,
+ // if the consecutive counts of the scale-out result in auto-scaling reach this number,
+ // the auto-scaling would be performed.
+ // If not set, the default value is 3.
+ // +optional
+ ScaleOutThreshold *int32 `json:"scaleOutThreshold,omitempty"`
+
+ // ScaleInThreshold describe the consecutive threshold for the auto-scaling,
+ // if the consecutive counts of the scale-in result in auto-scaling reach this number,
+ // the auto-scaling would be performed.
+ // If not set, the default value is 5.
+ // +optional
+ ScaleInThreshold *int32 `json:"scaleInThreshold,omitempty"`
}
-// TODO: sync status
+// +k8s:openapi-gen=true
+// TidbMonitorRef reference to a TidbMonitor
+type TidbMonitorRef struct {
+ // Namespace is the namespace that TidbMonitor object locates,
+ // default to the same namespace with TidbClusterAutoScaler
+ // +optional
+ Namespace string `json:"namespace,omitempty"`
+
+ // Name is the name of TidbMonitor object
+ Name string `json:"name"`
+}
+
+// +k8s:openapi-gen=true
+// TidbClusterAutoSclaerStatus describe the whole status
type TidbClusterAutoSclaerStatus struct {
+ // Tikv describes the status for the tikv in the last auto-scaling reconciliation
+ // +optional
+ TiKV *TikvAutoScalerStatus `json:"tikv,omitempty"`
+ // Tidb describes the status for the tidb in the last auto-scaling reconciliation
+ // +optional
+ TiDB *TidbAutoScalerStatus `json:"tidb,omitempty"`
+}
+
+// +k8s:openapi-gen=true
+// TidbAutoScalerStatus describe the auto-scaling status of tidb
+type TidbAutoScalerStatus struct {
+ BasicAutoScalerStatus `json:",inline"`
+}
+
+// +k8s:openapi-gen=true
+// TikvAutoScalerStatus describe the auto-scaling status of tikv
+type TikvAutoScalerStatus struct {
+ BasicAutoScalerStatus `json:",inline"`
+}
+
+// +k8s:openapi-gen=true
+// BasicAutoScalerStatus describe the basic auto-scaling status
+type BasicAutoScalerStatus struct {
+ // MetricsStatusList describes the metrics status in the last auto-scaling reconciliation
+ // +optional
+ MetricsStatusList []MetricsStatus `json:"metrics,omitempty"`
+ // CurrentReplicas describes the current replicas for the component(tidb/tikv)
+ CurrentReplicas int32 `json:"currentReplicas"`
+ // RecommendedReplicas describes the calculated replicas in the last auto-scaling reconciliation for the component(tidb/tikv)
+ // +optional
+ RecommendedReplicas *int32 `json:"recommendedReplicas,omitempty"`
+ // LastAutoScalingTimestamp describes the last auto-scaling timestamp for the component(tidb/tikv)
+ // +optional
+ LastAutoScalingTimestamp *metav1.Time `json:"lastAutoScalingTimestamp,omitempty"`
+}
+
+// +k8s:openapi-gen=true
+// MetricsStatus describe the basic metrics status in the last auto-scaling reconciliation
+type MetricsStatus struct {
+ // Name indicates the metrics name
+ Name string `json:"name"`
+ // CurrentValue indicates the value calculated in the last auto-scaling reconciliation
+ CurrentValue string `json:"currentValue"`
+ // TargetValue indicates the threshold value for this metrics in auto-scaling
+ ThresholdValue string `json:"thresholdValue"`
}
diff --git a/pkg/apis/pingcap/v1alpha1/tiflash_config.go b/pkg/apis/pingcap/v1alpha1/tiflash_config.go
new file mode 100644
index 0000000000..be35c1da41
--- /dev/null
+++ b/pkg/apis/pingcap/v1alpha1/tiflash_config.go
@@ -0,0 +1,352 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+// Port from TiFlash configurations till 2020/04/02
+
+// TiFlashConfig is the configuration of TiFlash.
+// +k8s:openapi-gen=true
+type TiFlashConfig struct {
+ // commonConfig is the Configuration of TiFlash process
+ // +optional
+ CommonConfig *CommonConfig `json:"config,omitempty"`
+
+ // proxyConfig is the Configuration of proxy process
+ // +optional
+ // +k8s:openapi-gen=false
+ ProxyConfig *ProxyConfig `json:"proxy,omitempty"`
+}
+
+// FlashServerConfig is the configuration of Proxy server.
+// +k8s:openapi-gen=false
+type FlashServerConfig struct {
+ // +optional
+ EngineAddr string `json:"engine-addr,omitempty" toml:"engine-addr,omitempty"`
+ TiKVServerConfig `json:",inline"`
+}
+
+// ProxyConfig is the configuration of TiFlash proxy process.
+// All the configurations are same with those of TiKV except adding `engine-addr` in the TiKVServerConfig
+// +k8s:openapi-gen=false
+type ProxyConfig struct {
+ // Optional: Defaults to info
+ // +optional
+ LogLevel string `json:"log-level,omitempty" toml:"log-level,omitempty"`
+ // +optional
+ LogFile string `json:"log-file,omitempty" toml:"log-file,omitempty"`
+ // Optional: Defaults to 24h
+ // +optional
+ LogRotationTimespan string `json:"log-rotation-timespan,omitempty" toml:"log-rotation-timespan,omitempty"`
+ // +optional
+ PanicWhenUnexpectedKeyOrData *bool `json:"panic-when-unexpected-key-or-data,omitempty" toml:"panic-when-unexpected-key-or-data,omitempty"`
+ // +optional
+ Server *FlashServerConfig `json:"server,omitempty" toml:"server,omitempty"`
+ // +optional
+ Storage *TiKVStorageConfig `json:"storage,omitempty" toml:"storage,omitempty"`
+ // +optional
+ Raftstore *TiKVRaftstoreConfig `json:"raftstore,omitempty" toml:"raftstore,omitempty"`
+ // +optional
+ Rocksdb *TiKVDbConfig `json:"rocksdb,omitempty" toml:"rocksdb,omitempty"`
+ // +optional
+ Coprocessor *TiKVCoprocessorConfig `json:"coprocessor,omitempty" toml:"coprocessor,omitempty"`
+ // +optional
+ ReadPool *TiKVReadPoolConfig `json:"readpool,omitempty" toml:"readpool,omitempty"`
+ // +optional
+ RaftDB *TiKVRaftDBConfig `json:"raftdb,omitempty" toml:"raftdb,omitempty"`
+ // +optional
+ Import *TiKVImportConfig `json:"import,omitempty" toml:"import,omitempty"`
+ // +optional
+ GC *TiKVGCConfig `json:"gc,omitempty" toml:"gc,omitempty"`
+ // +optional
+ PD *TiKVPDConfig `json:"pd,omitempty" toml:"pd,omitempty"`
+ // +optional
+ Security *TiKVSecurityConfig `json:"security,omitempty" toml:"security,omitempty"`
+}
+
+// CommonConfig is the configuration of TiFlash process.
+// +k8s:openapi-gen=true
+type CommonConfig struct {
+ // Optional: Defaults to "/data0/tmp"
+ // +optional
+ // +k8s:openapi-gen=false
+ TmpPath string `json:"tmp_path,omitempty" toml:"tmp_path,omitempty"`
+
+ // Optional: Defaults to "TiFlash"
+ // +optional
+ // +k8s:openapi-gen=false
+ DisplayName string `json:"display_name,omitempty" toml:"display_name,omitempty"`
+
+ // Optional: Defaults to "default"
+ // +optional
+ // +k8s:openapi-gen=false
+ DefaultProfile string `json:"default_profile,omitempty" toml:"default_profile,omitempty"`
+
+ // Optional: Defaults to "/data0/db"
+ // +optional
+ // +k8s:openapi-gen=false
+ Path string `json:"path,omitempty" toml:"path,omitempty"`
+
+ // Optional: Defaults to false
+ // +optional
+ PathRealtimeMode *bool `json:"path_realtime_mode,omitempty" toml:"path_realtime_mode,omitempty"`
+
+ // Optional: Defaults to 5368709120
+ // +optional
+ MarkCacheSize *int64 `json:"mark_cache_size,omitempty" toml:"mark_cache_size,omitempty"`
+
+ // Optional: Defaults to 5368709120
+ // +optional
+ MinmaxIndexCacheSize *int64 `json:"minmax_index_cache_size,omitempty" toml:"minmax_index_cache_size,omitempty"`
+
+ // Optional: Defaults to "0.0.0.0"
+ // +optional
+ // +k8s:openapi-gen=false
+ ListenHost string `json:"listen_host,omitempty" toml:"listen_host,omitempty"`
+
+ // Optional: Defaults to 9000
+ // +optional
+ // +k8s:openapi-gen=false
+ TCPPort *int32 `json:"tcp_port,omitempty" toml:"tcp_port,omitempty"`
+ // Optional: Defaults to 8123
+ // +optional
+ // +k8s:openapi-gen=false
+ HTTPPort *int32 `json:"http_port,omitempty" toml:"http_port,omitempty"`
+ // Optional: Defaults to 9009
+ // +optional
+ // +k8s:openapi-gen=false
+ InternalServerHTTPPort *int32 `json:"interserver_http_port,omitempty" toml:"interserver_http_port,omitempty"`
+ // +optional
+ Flash *Flash `json:"flash,omitempty" toml:"flash,omitempty"`
+ // +optional
+ FlashLogger *FlashLogger `json:"loger,omitempty" toml:"logger,omitempty"`
+ // +optional
+ // +k8s:openapi-gen=false
+ FlashApplication *FlashApplication `json:"application,omitempty" toml:"application,omitempty"`
+ // +optional
+ // +k8s:openapi-gen=false
+ FlashRaft *FlashRaft `json:"raft,omitempty" toml:"raft,omitempty"`
+ // +optional
+ // +k8s:openapi-gen=false
+ FlashStatus *FlashStatus `json:"status,omitempty" toml:"status,omitempty"`
+ // +optional
+ // +k8s:openapi-gen=false
+ FlashQuota *FlashQuota `json:"quotas,omitempty" toml:"quotas,omitempty"`
+ // +optional
+ // +k8s:openapi-gen=false
+ FlashUser *FlashUser `json:"users,omitempty" toml:"users,omitempty"`
+ // +optional
+ // +k8s:openapi-gen=false
+ FlashProfile *FlashProfile `json:"profiles,omitempty" toml:"profiles,omitempty"`
+}
+
+// FlashProfile is the configuration of [profiles] section.
+// +k8s:openapi-gen=false
+type FlashProfile struct {
+ // +optional
+ Readonly *Profile `json:"readonly,omitempty" toml:"readonly,omitempty"`
+ // +optional
+ Default *Profile `json:"default,omitempty" toml:"default,omitempty"`
+}
+
+// Profile is the configuration profiles.
+// +k8s:openapi-gen=false
+type Profile struct {
+ // +optional
+ Readonly *int32 `json:"readonly,omitempty" toml:"readonly,omitempty"`
+ // +optional
+ MaxMemoryUsage *int64 `json:"max_memory_usage,omitempty" toml:"max_memory_usage,omitempty"`
+ // +optional
+ UseUncompressedCache *int32 `json:"use_uncompressed_cache,omitempty" toml:"use_uncompressed_cache,omitempty"`
+ // +optional
+ LoadBalancing *string `json:"load_balancing,omitempty" toml:"load_balancing,omitempty"`
+}
+
+// FlashUser is the configuration of [users] section.
+// +k8s:openapi-gen=false
+type FlashUser struct {
+ // +optional
+ Readonly *User `json:"readonly,omitempty" toml:"readonly,omitempty"`
+ Default *User `json:"default,omitempty" toml:"default,omitempty"`
+}
+
+// User is the configuration of users.
+// +k8s:openapi-gen=false
+type User struct {
+ // +optional
+ Password string `json:"password,omitempty" toml:"password"`
+ // +optional
+ Profile string `json:"profile,omitempty" toml:"profile,omitempty"`
+ // +optional
+ Quota string `json:"quota,omitempty" toml:"quota,omitempty"`
+ // +optional
+ Networks *Networks `json:"networks,omitempty" toml:"networks,omitempty"`
+}
+
+// Networks is the configuration of [users.readonly.networks] section.
+// +k8s:openapi-gen=false
+type Networks struct {
+ // +optional
+ IP string `json:"ip,omitempty" toml:"ip,omitempty"`
+}
+
+// FlashQuota is the configuration of [quotas] section.
+// +k8s:openapi-gen=false
+type FlashQuota struct {
+ // +optional
+ Default *Quota `json:"default,omitempty" toml:"default,omitempty"`
+}
+
+// Quota is the configuration of [quotas.default] section.
+// +k8s:openapi-gen=false
+type Quota struct {
+ // +optional
+ Interval *Interval `json:"interval,omitempty" toml:"interval,omitempty"`
+}
+
+// Interval is the configuration of [quotas.default.interval] section.
+// +k8s:openapi-gen=false
+type Interval struct {
+ // Optional: Defaults to 3600
+ // +optional
+ Duration *int32 `json:"duration,omitempty" toml:"duration,omitempty"`
+ // Optional: Defaults to 0
+ // +optional
+ Queries *int32 `json:"queries,omitempty" toml:"queries,omitempty"`
+ // Optional: Defaults to 0
+ // +optional
+ Errors *int32 `json:"errors,omitempty" toml:"errors,omitempty"`
+ // Optional: Defaults to 0
+ // +optional
+ ResultRows *int32 `json:"result_rows,omitempty" toml:"result_rows,omitempty"`
+ // Optional: Defaults to 0
+ // +optional
+ ReadRows *int32 `json:"read_rows,omitempty" toml:"read_rows,omitempty"`
+ // Optional: Defaults to 0
+ // +optional
+ ExecutionTime *int32 `json:"execution_time,omitempty" toml:"execution_time,omitempty"`
+}
+
+// FlashStatus is the configuration of [status] section.
+// +k8s:openapi-gen=false
+type FlashStatus struct {
+ // Optional: Defaults to 8234
+ // +optional
+ MetricsPort *int32 `json:"metrics_port,omitempty" toml:"metrics_port,omitempty"`
+}
+
+// FlashRaft is the configuration of [raft] section.
+// +k8s:openapi-gen=false
+type FlashRaft struct {
+ // +optional
+ PDAddr string `json:"pd_addr,omitempty" toml:"pd_addr,omitempty"`
+ // Optional: Defaults to /data0/kvstore
+ // +optional
+ KVStorePath string `json:"kvstore_path,omitempty" toml:"kvstore_path,omitempty"`
+ // Optional: Defaults to dt
+ // +optional
+ StorageEngine string `json:"storage_engine,omitempty" toml:"storage_engine,omitempty"`
+}
+
+// FlashApplication is the configuration of [application] section.
+// +k8s:openapi-gen=false
+type FlashApplication struct {
+ // Optional: Defaults to true
+ // +optional
+ RunAsDaemon *bool `json:"runAsDaemon,omitempty" toml:"runAsDaemon,omitempty"`
+}
+
+// FlashLogger is the configuration of [logger] section.
+// +k8s:openapi-gen=true
+type FlashLogger struct {
+ // Optional: Defaults to /data0/logs/error.log
+ // +optional
+ // +k8s:openapi-gen=false
+ ErrorLog string `json:"errorlog,omitempty" toml:"errorlog,omitempty"`
+ // Optional: Defaults to 100M
+ // +optional
+ Size string `json:"size,omitempty" toml:"size,omitempty"`
+ // Optional: Defaults to /data0/logs/server.log
+ // +optional
+ // +k8s:openapi-gen=false
+ ServerLog string `json:"log,omitempty" toml:"log,omitempty"`
+ // Optional: Defaults to information
+ // +optional
+ Level string `json:"level,omitempty" toml:"level,omitempty"`
+ // Optional: Defaults to 10
+ // +optional
+ Count *int32 `json:"count,omitempty" toml:"count,omitempty"`
+}
+
+// Flash is the configuration of [flash] section.
+// +k8s:openapi-gen=true
+type Flash struct {
+ // +optional
+ // +k8s:openapi-gen=false
+ TiDBStatusAddr string `json:"tidb_status_addr,omitempty" toml:"tidb_status_addr,omitempty"`
+ // +optional
+ // +k8s:openapi-gen=false
+ ServiceAddr string `json:"service_addr,omitempty" toml:"service_addr,omitempty"`
+ // Optional: Defaults to 0.6
+ // +optional
+ OverlapThreshold *float64 `json:"overlap_threshold,omitempty" toml:"overlap_threshold,omitempty"`
+ // Optional: Defaults to 200
+ // +optional
+ CompactLogMinPeriod *int32 `json:"compact_log_min_period,omitempty" toml:"compact_log_min_period,omitempty"`
+ // +optional
+ FlashCluster *FlashCluster `json:"flash_cluster,omitempty" toml:"flash_cluster,omitempty"`
+ // +optional
+ // +k8s:openapi-gen=false
+ FlashProxy *FlashProxy `json:"proxy,omitempty" toml:"proxy,omitempty"`
+}
+
+// FlashCluster is the configuration of [flash.flash_cluster] section.
+// +k8s:openapi-gen=true
+type FlashCluster struct {
+ // Optional: Defaults to /tiflash/flash_cluster_manager
+ // +optional
+ // +k8s:openapi-gen=false
+ ClusterManagerPath string `json:"cluster_manager_path,omitempty" toml:"cluster_manager_path,omitempty"`
+ // Optional: Defaults to /data0/logs/flash_cluster_manager.log
+ // +optional
+ // +k8s:openapi-gen=false
+ ClusterLog string `json:"log,omitempty" toml:"log,omitempty"`
+ // Optional: Defaults to 20
+ // +optional
+ RefreshInterval *int32 `json:"refresh_interval,omitempty" toml:"refresh_interval,omitempty"`
+ // Optional: Defaults to 10
+ // +optional
+ UpdateRuleInterval *int32 `json:"update_rule_interval,omitempty" toml:"update_rule_interval,omitempty"`
+ // Optional: Defaults to 60
+ // +optional
+ MasterTTL *int32 `json:"master_ttl,omitempty" toml:"master_ttl,omitempty"`
+}
+
+// FlashProxy is the configuration of [flash.proxy] section.
+// +k8s:openapi-gen=false
+type FlashProxy struct {
+ // Optional: Defaults to 0.0.0.0:20170
+ // +optional
+ Addr string `json:"addr,omitempty" toml:"addr,omitempty"`
+ // +optional
+ AdvertiseAddr string `json:"advertise-addr,omitempty" toml:"advertise-addr,omitempty"`
+ // Optional: Defaults to /data0/proxy
+ // +optional
+ DataDir string `json:"data-dir,omitempty" toml:"data-dir,omitempty"`
+ // Optional: Defaults to /data0/proxy.toml
+ // +optional
+ Config string `json:"config,omitempty" toml:"config,omitempty"`
+ // Optional: Defaults to /data0/logs/proxy.log
+ // +optional
+ LogFile string `json:"log-file,omitempty" toml:"log-file,omitempty"`
+}
diff --git a/pkg/apis/pingcap/v1alpha1/tikv_config.go b/pkg/apis/pingcap/v1alpha1/tikv_config.go
index 7aeec356e9..0257367d87 100644
--- a/pkg/apis/pingcap/v1alpha1/tikv_config.go
+++ b/pkg/apis/pingcap/v1alpha1/tikv_config.go
@@ -51,6 +51,8 @@ type TiKVConfig struct {
PD *TiKVPDConfig `json:"pd,omitempty" toml:"pd,omitempty"`
// +optional
Security *TiKVSecurityConfig `json:"security,omitempty" toml:"security,omitempty"`
+ // +optional
+ Encryption *TiKVEncryptionConfig `json:"encryption,omitempty" toml:"encryption,omitempty"`
}
// +k8s:openapi-gen=true
@@ -65,50 +67,50 @@ type TiKVReadPoolConfig struct {
type TiKVStorageReadPoolConfig struct {
// Optional: Defaults to 4
// +optional
- HighConcurrency *int64 `json:"high_concurrency,omitempty" toml:"high_concurrency,omitempty"`
+ HighConcurrency *int64 `json:"high-concurrency,omitempty" toml:"high-concurrency,omitempty"`
// Optional: Defaults to 4
// +optional
- NormalConcurrency *int64 `json:"normal_concurrency,omitempty" toml:"normal_concurrency,omitempty"`
+ NormalConcurrency *int64 `json:"normal-concurrency,omitempty" toml:"normal-concurrency,omitempty"`
// Optional: Defaults to 4
// +optional
- LowConcurrency *int64 `json:"low_concurrency,omitempty" toml:"low_concurrency,omitempty"`
+ LowConcurrency *int64 `json:"low-concurrency,omitempty" toml:"low-concurrency,omitempty"`
// Optional: Defaults to 2000
// +optional
- MaxTasksPerWorkerHigh *int64 `json:"max_tasks_per_worker_high,omitempty" toml:"max_tasks_per_worker_high,omitempty"`
+ MaxTasksPerWorkerHigh *int64 `json:"max-tasks-per-worker-high,omitempty" toml:"max-tasks-per-worker-high,omitempty"`
// Optional: Defaults to 2000
// +optional
- MaxTasksPerWorkerNormal *int64 `json:"max_tasks_per_worker_normal,omitempty" toml:"max_tasks_per_worker_normal,omitempty"`
+ MaxTasksPerWorkerNormal *int64 `json:"max-tasks-per-worker-normal,omitempty" toml:"max-tasks-per-worker-normal,omitempty"`
// Optional: Defaults to 2000
// +optional
- MaxTasksPerWorkerLow *int64 `json:"max_tasks_per_worker_low,omitempty" toml:"max_tasks_per_worker_low,omitempty"`
+ MaxTasksPerWorkerLow *int64 `json:"max-tasks-per-worker-low,omitempty" toml:"max-tasks-per-worker-low,omitempty"`
// Optional: Defaults to 10MB
// +optional
- StackSize string `json:"stack_size,omitempty" toml:"stack_size,omitempty"`
+ StackSize string `json:"stack-size,omitempty" toml:"stack-size,omitempty"`
}
// +k8s:openapi-gen=true
type TiKVCoprocessorReadPoolConfig struct {
// Optional: Defaults to 8
// +optional
- HighConcurrency *int64 `json:"high_concurrency,omitempty" toml:"high_concurrency,omitempty"`
+ HighConcurrency *int64 `json:"high-concurrency,omitempty" toml:"high-concurrency,omitempty"`
// Optional: Defaults to 8
// +optional
- NormalConcurrency *int64 `json:"normal_concurrency,omitempty" toml:"normal_concurrency,omitempty"`
+ NormalConcurrency *int64 `json:"normal-concurrency,omitempty" toml:"normal-concurrency,omitempty"`
// Optional: Defaults to 8
// +optional
- LowConcurrency *int64 `json:"low_concurrency,omitempty" toml:"low_concurrency,omitempty"`
+ LowConcurrency *int64 `json:"low-concurrency,omitempty" toml:"low-concurrency,omitempty"`
// Optional: Defaults to 2000
// +optional
- MaxTasksPerWorkerHigh *int64 `json:"max_tasks_per_worker_high,omitempty" toml:"max_tasks_per_worker_high,omitempty"`
+ MaxTasksPerWorkerHigh *int64 `json:"max-tasks-per-worker-high,omitempty" toml:"max-tasks-per-worker-high,omitempty"`
// Optional: Defaults to 2000
// +optional
- MaxTasksPerWorkerNormal *int64 `json:"max_tasks_per_worker_normal,omitempty" toml:"max_tasks_per_worker_normal,omitempty"`
+ MaxTasksPerWorkerNormal *int64 `json:"max-tasks-per-worker-normal,omitempty" toml:"max-tasks-per-worker-normal,omitempty"`
// Optional: Defaults to 2000
// +optional
- MaxTasksPerWorkerLow *int64 `json:"max_tasks_per_worker_low,omitempty" toml:"max_tasks_per_worker_low,omitempty"`
+ MaxTasksPerWorkerLow *int64 `json:"max-tasks-per-worker-low,omitempty" toml:"max-tasks-per-worker-low,omitempty"`
// Optional: Defaults to 10MB
// +optional
- StackSize string `json:"stack_size,omitempty" toml:"stack_size,omitempty"`
+ StackSize string `json:"stack-size,omitempty" toml:"stack-size,omitempty"`
}
// +k8s:openapi-gen=true
@@ -123,70 +125,70 @@ type TiKVPDConfig struct {
// Default is 300ms.
// Optional: Defaults to 300ms
// +optional
- RetryInterval string `json:"retry_interval,omitempty" toml:"retry_interval,omitempty"`
+ RetryInterval string `json:"retry-interval,omitempty" toml:"retry-interval,omitempty"`
// The maximum number of times to retry a PD connection initialization.
//
// Default is isize::MAX, represented by -1.
// Optional: Defaults to -1
// +optional
- RetryMaxCount *int64 `json:"retry_max_count,omitempty" toml:"retry_max_count,omitempty"`
+ RetryMaxCount *int64 `json:"retry-max-count,omitempty" toml:"retry-max-count,omitempty"`
// If the client observes the same error message on retry, it can repeat the message only
// every `n` times.
//
// Default is 10. Set to 1 to disable this feature.
// Optional: Defaults to 10
// +optional
- RetryLogEvery *int64 `json:"retry_log_every,omitempty" toml:"retry_log_every,omitempty"`
+ RetryLogEvery *int64 `json:"retry-log-every,omitempty" toml:"retry-log-every,omitempty"`
}
// +k8s:openapi-gen=true
type TiKVRaftDBConfig struct {
// +optional
- WalRecoveryMode string `json:"wal_recovery_mode,omitempty" toml:"wal_recovery_mode,omitempty"`
+ WalRecoveryMode string `json:"wal-recovery-mode,omitempty" toml:"wal-recovery-mode,omitempty"`
// +optional
- WalDir string `json:"wal_dir,omitempty" toml:"wal_dir,omitempty"`
+ WalDir string `json:"wal-dir,omitempty" toml:"wal-dir,omitempty"`
// +optional
- WalTtlSeconds *int64 `json:"wal_ttl_seconds,omitempty" toml:"wal_ttl_seconds,omitempty"`
+ WalTtlSeconds *int64 `json:"wal-ttl-seconds,omitempty" toml:"wal-ttl-seconds,omitempty"`
// +optional
- WalSizeLimit string `json:"wal_size_limit,omitempty" toml:"wal_size_limit,omitempty"`
+ WalSizeLimit string `json:"wal-size-limit,omitempty" toml:"wal-size-limit,omitempty"`
// +optional
- MaxTotalWalSize string `json:"max_total_wal_size,omitempty" toml:"max_total_wal_size,omitempty"`
+ MaxTotalWalSize string `json:"max-total-wal-size,omitempty" toml:"max-total-wal-size,omitempty"`
// +optional
- MaxBackgroundJobs *int64 `json:"max_background_jobs,omitempty" toml:"max_background_jobs,omitempty"`
+ MaxBackgroundJobs *int64 `json:"max-background-jobs,omitempty" toml:"max-background-jobs,omitempty"`
// +optional
- MaxManifestFileSize string `json:"max_manifest_file_size,omitempty" toml:"max_manifest_file_size,omitempty"`
+ MaxManifestFileSize string `json:"max-manifest-file-size,omitempty" toml:"max-manifest-file-size,omitempty"`
// +optional
- CreateIfMissing *bool `json:"create_if_missing,omitempty" toml:"create_if_missing,omitempty"`
+ CreateIfMissing *bool `json:"create-if-missing,omitempty" toml:"create-if-missing,omitempty"`
// +optional
- MaxOpenFiles *int64 `json:"max_open_files,omitempty" toml:"max_open_files,omitempty"`
+ MaxOpenFiles *int64 `json:"max-open-files,omitempty" toml:"max-open-files,omitempty"`
// +optional
- EnableStatistics *bool `json:"enable_statistics,omitempty" toml:"enable_statistics,omitempty"`
+ EnableStatistics *bool `json:"enable-statistics,omitempty" toml:"enable-statistics,omitempty"`
// +optional
- StatsDumpPeriod string `json:"stats_dump_period,omitempty" toml:"stats_dump_period,omitempty"`
+ StatsDumpPeriod string `json:"stats-dump-period,omitempty" toml:"stats-dump-period,omitempty"`
// +optional
- CompactionReadaheadSize string `json:"compaction_readahead_size,omitempty" toml:"compaction_readahead_size,omitempty"`
+ CompactionReadaheadSize string `json:"compaction-readahead-size,omitempty" toml:"compaction-readahead-size,omitempty"`
// +optional
- InfoLogMaxSize string `json:"info_log_max_size,omitempty" toml:"info_log_max_size,omitempty"`
+ InfoLogMaxSize string `json:"info-log-max-size,omitempty" toml:"info-log-max-size,omitempty"`
// +optional
- FnfoLogRollTime string `json:"info_log_roll_time,omitempty" toml:"info_log_roll_time,omitempty"`
+ FnfoLogRollTime string `json:"info-log-roll-time,omitempty" toml:"info-log-roll-time,omitempty"`
// +optional
- InfoLogKeepLogFileNum *int64 `json:"info_log_keep_log_file_num,omitempty" toml:"info_log_keep_log_file_num,omitempty"`
+ InfoLogKeepLogFileNum *int64 `json:"info-log-keep-log-file-num,omitempty" toml:"info-log-keep-log-file-num,omitempty"`
// +optional
- InfoLogDir string `json:"info_log_dir,omitempty" toml:"info_log_dir,omitempty"`
+ InfoLogDir string `json:"info-log-dir,omitempty" toml:"info-log-dir,omitempty"`
// +optional
- MaxSubCompactions *int64 `json:"max_sub_compactions,omitempty" toml:"max_sub_compactions,omitempty"`
+ MaxSubCompactions *int64 `json:"max-sub-compactions,omitempty" toml:"max-sub-compactions,omitempty"`
// +optional
- WritableFileMaxBufferSize string `json:"writable_file_max_buffer_size,omitempty" toml:"writable_file_max_buffer_size,omitempty"`
+ WritableFileMaxBufferSize string `json:"writable-file-max-buffer-size,omitempty" toml:"writable-file-max-buffer-size,omitempty"`
// +optional
- UseDirectIoForFlushAndCompaction *bool `json:"use_direct_io_for_flush_and_compaction,omitempty" toml:"use_direct_io_for_flush_and_compaction,omitempty"`
+ UseDirectIoForFlushAndCompaction *bool `json:"use-direct-io-for-flush-and-compaction,omitempty" toml:"use-direct-io-for-flush-and-compaction,omitempty"`
// +optional
- EnablePipelinedWrite *bool `json:"enable_pipelined_write,omitempty" toml:"enable_pipelined_write,omitempty"`
+ EnablePipelinedWrite *bool `json:"enable-pipelined-write,omitempty" toml:"enable-pipelined-write,omitempty"`
// +optional
- AllowConcurrentMemtableWrite *bool `json:"allow_concurrent_memtable_write,omitempty" toml:"allow_concurrent_memtable_write,omitempty"`
+ AllowConcurrentMemtableWrite *bool `json:"allow-concurrent-memtable-write,omitempty" toml:"allow-concurrent-memtable-write,omitempty"`
// +optional
- BytesPerSync string `json:"bytes_per_sync,omitempty" toml:"bytes_per_sync,omitempty"`
+ BytesPerSync string `json:"bytes-per-sync,omitempty" toml:"bytes-per-sync,omitempty"`
// +optional
- WalBytesPerSync string `json:"wal_bytes_per_sync,omitempty" toml:"wal_bytes_per_sync,omitempty"`
+ WalBytesPerSync string `json:"wal-bytes-per-sync,omitempty" toml:"wal-bytes-per-sync,omitempty"`
// +optional
Defaultcf *TiKVCfConfig `json:"defaultcf,omitempty" toml:"defaultcf,omitempty"`
}
@@ -194,46 +196,50 @@ type TiKVRaftDBConfig struct {
// +k8s:openapi-gen=true
type TiKVSecurityConfig struct {
// +optional
- CAPath string `json:"ca_path,omitempty" toml:"ca_path,omitempty"`
+ CAPath string `json:"ca-path,omitempty" toml:"ca-path,omitempty"`
+ // +optional
+ CertPath string `json:"cert-path,omitempty" toml:"cert-path,omitempty"`
// +optional
- CertPath string `json:"cert_path,omitempty" toml:"cert_path,omitempty"`
+ KeyPath string `json:"key-path,omitempty" toml:"key-path,omitempty"`
+ // CertAllowedCN is the Common Name that allowed
// +optional
- KeyPath string `json:"key_path,omitempty" toml:"key_path,omitempty"`
+ // +k8s:openapi-gen=false
+ CertAllowedCN []string `json:"cert-allowed-cn,omitempty" toml:"cert-allowed-cn,omitempty"`
// +optional
- OverrideSslTarget string `json:"override_ssl_target,omitempty" toml:"override_ssl_target,omitempty"`
+ OverrideSslTarget string `json:"override-ssl-target,omitempty" toml:"override-ssl-target,omitempty"`
// +optional
- CipherFile string `json:"cipher_file,omitempty" toml:"cipher_file,omitempty"`
+ CipherFile string `json:"cipher-file,omitempty" toml:"cipher-file,omitempty"`
}
// +k8s:openapi-gen=true
type TiKVImportConfig struct {
// +optional
- ImportDir string `json:"import_dir,omitempty" toml:"import_dir,omitempty"`
+ ImportDir string `json:"import-dir,omitempty" toml:"import-dir,omitempty"`
// +optional
- NumThreads *int64 `json:"num_threads,omitempty" toml:"num_threads,omitempty"`
+ NumThreads *int64 `json:"num-threads,omitempty" toml:"num-threads,omitempty"`
// +optional
- NumImportJobs *int64 `json:"num_import_jobs,omitempty" toml:"num_import_jobs,omitempty"`
+ NumImportJobs *int64 `json:"num-import-jobs,omitempty" toml:"num-import-jobs,omitempty"`
// +optional
- NumImportSstJobs *int64 `json:"num_import_sst_jobs,omitempty" toml:"num_import_sst_jobs,omitempty"`
+ NumImportSstJobs *int64 `json:"num-import-sst-jobs,omitempty" toml:"num-import-sst-jobs,omitempty"`
// +optional
- MaxPrepareDuration string `json:"max_prepare_duration,omitempty" toml:"max_prepare_duration,omitempty"`
+ MaxPrepareDuration string `json:"max-prepare-duration,omitempty" toml:"max-prepare-duration,omitempty"`
// +optional
- RegionSplitSize string `json:"region_split_size,omitempty" toml:"region_split_size,omitempty"`
+ RegionSplitSize string `json:"region-split-size,omitempty" toml:"region-split-size,omitempty"`
// +optional
- StreamChannelWindow *int64 `json:"stream_channel_window,omitempty" toml:"stream_channel_window,omitempty"`
+ StreamChannelWindow *int64 `json:"stream-channel-window,omitempty" toml:"stream-channel-window,omitempty"`
// +optional
- MaxOpenEngines *int64 `json:"max_open_engines,omitempty" toml:"max_open_engines,omitempty"`
+ MaxOpenEngines *int64 `json:"max-open-engines,omitempty" toml:"max-open-engines,omitempty"`
// +optional
- UploadSpeedLimit string `json:"upload_speed_limit,omitempty" toml:"upload_speed_limit,omitempty"`
+ UploadSpeedLimit string `json:"upload-speed-limit,omitempty" toml:"upload-speed-limit,omitempty"`
}
// +k8s:openapi-gen=true
type TiKVGCConfig struct {
// +optional
// Optional: Defaults to 512
- BatchKeys *int64 `json:" batch_keys,omitempty" toml:" batch_keys,omitempty"`
+ BatchKeys *int64 `json:" batch-keys,omitempty" toml:" batch-keys,omitempty"`
// +optional
- MaxWriteBytesPerSec string `json:" max_write_bytes_per_sec,omitempty" toml:" max_write_bytes_per_sec,omitempty"`
+ MaxWriteBytesPerSec string `json:" max-write-bytes-per-sec,omitempty" toml:" max-write-bytes-per-sec,omitempty"`
}
// TiKVDbConfig is the rocksdb config.
@@ -476,7 +482,7 @@ type TiKVServerConfig struct {
GrpcConcurrentStream *uint `json:"grpc-concurrent-stream,omitempty" toml:"grpc-concurrent-stream,omitempty"`
// Optional: Defaults to 32G
// +optional
- GrpcMemoryQuota *string `json:"grpc_memory_pool_quota,omitempty" toml:"grpc_memory_pool_quota,omitempty"`
+ GrpcMemoryQuota *string `json:"grpc-memory-pool-quota,omitempty" toml:"grpc-memory-pool-quota,omitempty"`
// Optional: Defaults to 10
// +optional
GrpcRaftConnNum *uint `json:"grpc-raft-conn-num,omitempty" toml:"grpc-raft-conn-num,omitempty"`
@@ -710,31 +716,104 @@ type TiKVCoprocessorConfig struct {
// optional
BatchSplitLimit *int64 `json:"batch-split-limit,omitempty" toml:"batch-split-limit,omitempty"`
- // When Region [a,e) size exceeds `region_max_size`, it will be split into several Regions [a,b),
- // [b,c), [c,d), [d,e) and the size of [a,b), [b,c), [c,d) will be `region_split_size` (or a
+ // When Region [a,e) size exceeds `region-max-size`, it will be split into several Regions [a,b),
+ // [b,c), [c,d), [d,e) and the size of [a,b), [b,c), [c,d) will be `region-split-size` (or a
// little larger). See also: region-split-size
// Optional: Defaults to 144MB
// optional
RegionMaxSize string `json:"region-max-size,omitempty" toml:"region-max-size,omitempty"`
- // When Region [a,e) size exceeds `region_max_size`, it will be split into several Regions [a,b),
- // [b,c), [c,d), [d,e) and the size of [a,b), [b,c), [c,d) will be `region_split_size` (or a
+ // When Region [a,e) size exceeds `region-max-size`, it will be split into several Regions [a,b),
+ // [b,c), [c,d), [d,e) and the size of [a,b), [b,c), [c,d) will be `region-split-size` (or a
// little larger). See also: region-max-size
// Optional: Defaults to 96MB
// optional
RegionSplitSize string `json:"region-split-size,omitempty" toml:"region-split-size,omitempty"`
- // When the number of keys in Region [a,e) exceeds the `region_max_keys`, it will be split into
+ // When the number of keys in Region [a,e) exceeds the `region-max-keys`, it will be split into
// several Regions [a,b), [b,c), [c,d), [d,e) and the number of keys in [a,b), [b,c), [c,d) will be
- // `region_split_keys`. See also: region-split-keys
+ // `region-split-keys`. See also: region-split-keys
// Optional: Defaults to 1440000
// optional
RegionMaxKeys *int64 `json:"region-max-keys,omitempty" toml:"region-max-keys,omitempty"`
- // When the number of keys in Region [a,e) exceeds the `region_max_keys`, it will be split into
+ // When the number of keys in Region [a,e) exceeds the `region-max-keys`, it will be split into
// several Regions [a,b), [b,c), [c,d), [d,e) and the number of keys in [a,b), [b,c), [c,d) will be
- // `region_split_keys`. See also: region-max-keys
+ // `region-split-keys`. See also: region-max-keys
// Optional: Defaults to 960000
// optional
RegionSplitKeys *int64 `json:"region-split-keys,omitempty" toml:"region-split-keys,omitempty"`
}
+
+// +k8s:openapi-gen=true
+type TiKVEncryptionConfig struct {
+ // Encrypyion method, use data key encryption raw rocksdb data
+ // Possible values: plaintext, aes128-ctr, aes192-ctr, aes256-ctr
+ // Optional: Default to plaintext
+ // optional
+ Method string `json:"method,omitempty" toml:"method,omitempty"`
+
+ // The frequency of datakey rotation, It managered by tikv
+ // Optional: default to 7d
+ // optional
+ DataKeyRotationPeriod string `json:"data-key-rotation-period,omitempty" toml:"data-key-rotation-period,omitempty"`
+
+ // Master key config
+ MasterKey *TiKVMasterKeyConfig `json:"master-key,omitempty" toml:"master-key,omitempty"`
+
+ // Previous master key config
+ // It used in master key rotation, the data key should decryption by previous master key and then encrypytion by new master key
+ PreviousMasterKey *TiKVMasterKeyConfig `json:"previous-master-key,omitempty" toml:"previoud-master-key,omitempty"`
+}
+
+// +k8s:openapi-gen=true
+type TiKVMasterKeyConfig struct {
+ // Use KMS encryption or use file encryption, possible values: kms, file
+ // If set to kms, kms MasterKeyKMSConfig should be filled, if set to file MasterKeyFileConfig should be filled
+ // optional
+ Type string `json:"type,omitempty" toml:"type,omitempty"`
+
+ // Master key file config
+ // If the type set to file, this config should be filled
+ MasterKeyFileConfig `json:",inline"`
+
+ // Master key KMS config
+ // If the type set to kms, this config should be filled
+ MasterKeyKMSConfig `json:",inline"`
+}
+
+// +k8s:openapi-gen=true
+type MasterKeyFileConfig struct {
+ // Encrypyion method, use master key encryption data key
+ // Possible values: plaintext, aes128-ctr, aes192-ctr, aes256-ctr
+ // Optional: Default to plaintext
+ // optional
+ Method string `json:"method,omitempty" toml:"method,omitempty"`
+
+ // Text file containing the key in hex form, end with '\n'
+ Path string `json:"path" toml:"path"`
+}
+
+// +k8s:openapi-gen=true
+type MasterKeyKMSConfig struct {
+ // AWS CMK key-id it can be find in AWS Console or use aws cli
+ // This field is required
+ KeyID string `json:"key-id" toml:"key-id"`
+
+ // AccessKey of AWS user, leave empty if using other authrization method
+ // optional
+ AccessKey string `json:"access-key,omitempty" toml:"access-key,omitempty"`
+
+ // SecretKey of AWS user, leave empty if using other authrization method
+ // optional
+ SecretKey string `json:"secret-access-key,omitempty" toml:"access-key,omitempty"`
+
+ // Region of this KMS key
+ // Optional: Default to us-east-1
+ // optional
+ Region string `json:"region,omitempty" toml:"region,omitempty"`
+
+ // Used for KMS compatible KMS, such as Ceph, minio, If use AWS, leave empty
+ // optional
+ Endpoint string `json:"endpoint,omitempty" toml:"endpoint,omitempty"`
+}
diff --git a/pkg/apis/pingcap/v1alpha1/tikv_config_test.go b/pkg/apis/pingcap/v1alpha1/tikv_config_test.go
new file mode 100644
index 0000000000..e9841be924
--- /dev/null
+++ b/pkg/apis/pingcap/v1alpha1/tikv_config_test.go
@@ -0,0 +1,71 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v1alpha1
+
+import (
+ "bytes"
+ "encoding/json"
+ "testing"
+
+ "github.com/BurntSushi/toml"
+ . "github.com/onsi/gomega"
+ "k8s.io/utils/pointer"
+)
+
+func TestTiKVConfig(t *testing.T) {
+ g := NewGomegaWithT(t)
+ c := &TiKVConfig{
+ ReadPool: &TiKVReadPoolConfig{
+ Storage: &TiKVStorageReadPoolConfig{
+ HighConcurrency: pointer.Int64Ptr(4),
+ },
+ Coprocessor: &TiKVCoprocessorReadPoolConfig{
+ HighConcurrency: pointer.Int64Ptr(8),
+ },
+ },
+ Storage: &TiKVStorageConfig{
+ BlockCache: &TiKVBlockCacheConfig{
+ Shared: pointer.BoolPtr(true),
+ },
+ },
+ }
+ jsonStr, err := json.Marshal(c)
+ g.Expect(err).To(Succeed())
+ g.Expect(jsonStr).NotTo(ContainSubstring("port"), "Expected empty fields to be omitted")
+ var jsonUnmarshaled TiKVConfig
+ err = json.Unmarshal(jsonStr, &jsonUnmarshaled)
+ g.Expect(err).To(Succeed())
+ g.Expect(&jsonUnmarshaled).To(Equal(c))
+
+ buff := new(bytes.Buffer)
+ encoder := toml.NewEncoder(buff)
+ err = encoder.Encode(c)
+ g.Expect(err).To(Succeed())
+ tStr := buff.String()
+ g.Expect(tStr).To((Equal(`[storage]
+ [storage.block-cache]
+ shared = true
+
+[readpool]
+ [readpool.coprocessor]
+ high-concurrency = 8
+ [readpool.storage]
+ high-concurrency = 4
+`)))
+
+ var tUnmarshaled TiKVConfig
+ err = toml.Unmarshal([]byte(tStr), &tUnmarshaled)
+ g.Expect(err).To(Succeed())
+ g.Expect(&tUnmarshaled).To(Equal(c))
+}
diff --git a/pkg/apis/pingcap/v1alpha1/types.go b/pkg/apis/pingcap/v1alpha1/types.go
index 9d68257a3d..3579491a89 100644
--- a/pkg/apis/pingcap/v1alpha1/types.go
+++ b/pkg/apis/pingcap/v1alpha1/types.go
@@ -42,6 +42,8 @@ const (
TiDBMemberType MemberType = "tidb"
// TiKVMemberType is tikv container type
TiKVMemberType MemberType = "tikv"
+ // TiFlashMemberType is tiflash container type
+ TiFlashMemberType MemberType = "tiflash"
// SlowLogTailerMemberType is tidb log tailer container type
SlowLogTailerMemberType MemberType = "slowlog"
// UnknownMemberType is unknown container type
@@ -111,6 +113,10 @@ type TidbClusterSpec struct {
// TiKV cluster spec
TiKV TiKVSpec `json:"tikv"`
+ // TiFlash cluster spec
+ // +optional
+ TiFlash *TiFlashSpec `json:"tiflash,omitempty"`
+
// Pump cluster spec
// +optional
Pump *PumpSpec `json:"pump,omitempty"`
@@ -119,6 +125,11 @@ type TidbClusterSpec struct {
// +optional
Helper *HelperSpec `json:"helper,omitempty"`
+ // Indicates that the tidb cluster is paused and will not be processed by
+ // the controller.
+ // +optional
+ Paused bool `json:"paused,omitempty"`
+
// TODO: remove optional after defaulting logic introduced
// TiDB cluster version
// +optional
@@ -150,10 +161,10 @@ type TidbClusterSpec struct {
// +optional
EnablePVReclaim *bool `json:"enablePVReclaim,omitempty"`
- // Enable TLS connection between TiDB server components
- // Optional: Defaults to false
+ // Whether enable the TLS connection between TiDB server components
+ // Optional: Defaults to nil
// +optional
- EnableTLSCluster *bool `json:"enableTLSCluster,omitempty"`
+ TLSCluster *TLSCluster `json:"tlsCluster,omitempty"`
// Whether Hostnetwork is enabled for TiDB cluster Pods
// Optional: Defaults to false
@@ -177,7 +188,7 @@ type TidbClusterSpec struct {
// +optional
Annotations map[string]string `json:"annotations,omitempty"`
- // Base tolerations of TiDB cluster Pods, components may add more tolreations upon this respectively
+ // Base tolerations of TiDB cluster Pods, components may add more tolerations upon this respectively
// +optional
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
@@ -194,11 +205,12 @@ type TidbClusterSpec struct {
// TidbClusterStatus represents the current status of a tidb cluster.
type TidbClusterStatus struct {
- ClusterID string `json:"clusterID,omitempty"`
- PD PDStatus `json:"pd,omitempty"`
- TiKV TiKVStatus `json:"tikv,omitempty"`
- TiDB TiDBStatus `json:"tidb,omitempty"`
- Pump PumpStatus `josn:"pump,omitempty"`
+ ClusterID string `json:"clusterID,omitempty"`
+ PD PDStatus `json:"pd,omitempty"`
+ TiKV TiKVStatus `json:"tikv,omitempty"`
+ TiDB TiDBStatus `json:"tidb,omitempty"`
+ Pump PumpStatus `josn:"pump,omitempty"`
+ TiFlash TiFlashStatus `json:"tiflash,omitempty"`
}
// +k8s:openapi-gen=true
@@ -222,6 +234,12 @@ type PDSpec struct {
// +optional
Service *ServiceSpec `json:"service,omitempty"`
+ // MaxFailoverCount limit the max replicas could be added in failover, 0 means no failover.
+ // Optional: Defaults to 3
+ // +kubebuilder:validation:Minimum=0
+ // +optional
+ MaxFailoverCount *int32 `json:"maxFailoverCount,omitempty"`
+
// The storageClassName of the persistent volume for PD data storage.
// Defaults to Kubernetes default storage class.
// +optional
@@ -238,6 +256,9 @@ type TiKVSpec struct {
ComponentSpec `json:",inline"`
corev1.ResourceRequirements `json:",inline"`
+ // Specify a Service Account for tikv
+ ServiceAccount string `json:"serviceAccount,omitempty"`
+
// The desired ready replicas
// +kubebuilder:validation:Minimum=1
Replicas int32 `json:"replicas"`
@@ -254,8 +275,8 @@ type TiKVSpec struct {
// +optional
Privileged *bool `json:"privileged,omitempty"`
- // MaxFailoverCount limit the max replicas could be added in failover, 0 means unlimited
- // Optional: Defaults to 0
+ // MaxFailoverCount limit the max replicas could be added in failover, 0 means no failover
+ // Optional: Defaults to 3
// +kubebuilder:validation:Minimum=0
// +optional
MaxFailoverCount *int32 `json:"maxFailoverCount,omitempty"`
@@ -270,6 +291,68 @@ type TiKVSpec struct {
Config *TiKVConfig `json:"config,omitempty"`
}
+// TiFlashSpec contains details of TiFlash members
+// +k8s:openapi-gen=true
+type TiFlashSpec struct {
+ ComponentSpec `json:",inline"`
+ corev1.ResourceRequirements `json:",inline"`
+
+ // Specify a Service Account for TiFlash
+ ServiceAccount string `json:"serviceAccount,omitempty"`
+
+ // The desired ready replicas
+ // +kubebuilder:validation:Minimum=1
+ Replicas int32 `json:"replicas"`
+
+ // Base image of the component, image tag is now allowed during validation
+ // +kubebuilder:default=pingcap/tiflash
+ // +optional
+ BaseImage string `json:"baseImage"`
+
+ // Whether create the TiFlash container in privileged mode, it is highly discouraged to enable this in
+ // critical environment.
+ // Optional: defaults to false
+ // +optional
+ Privileged *bool `json:"privileged,omitempty"`
+
+ // MaxFailoverCount limit the max replicas could be added in failover, 0 means no failover
+ // Optional: Defaults to 3
+ // +kubebuilder:validation:Minimum=0
+ // +optional
+ MaxFailoverCount *int32 `json:"maxFailoverCount,omitempty"`
+
+ // The persistent volume claims of the TiFlash data storages.
+ // TiFlash supports multiple disks.
+ StorageClaims []StorageClaim `json:"storageClaims"`
+
+ // Config is the Configuration of TiFlash
+ // +optional
+ Config *TiFlashConfig `json:"config,omitempty"`
+
+ // LogTailer is the configurations of the log tailers for TiFlash
+ // +optional
+ LogTailer *LogTailerSpec `json:"logTailer,omitempty"`
+}
+
+// +k8s:openapi-gen=true
+// LogTailerSpec represents an optional log tailer sidecar container
+type LogTailerSpec struct {
+ corev1.ResourceRequirements `json:",inline"`
+}
+
+// +k8s:openapi-gen=true
+// StorageClaim contains details of TiFlash storages
+type StorageClaim struct {
+ // Resources represents the minimum resources the volume should have.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ // +optional
+ Resources corev1.ResourceRequirements `json:"resources,omitempty"`
+ // Name of the StorageClass required by the claim.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ // +optional
+ StorageClassName *string `json:"storageClassName,omitempty"`
+}
+
// +k8s:openapi-gen=true
// TiDBSpec contains details of TiDB members
type TiDBSpec struct {
@@ -296,8 +379,8 @@ type TiDBSpec struct {
// +optional
BinlogEnabled *bool `json:"binlogEnabled,omitempty"`
- // MaxFailoverCount limit the max replicas could be added in failover, 0 means unlimited
- // Optional: Defaults to 0
+ // MaxFailoverCount limit the max replicas could be added in failover, 0 means no failover
+ // Optional: Defaults to 3
// +kubebuilder:validation:Minimum=0
// +optional
MaxFailoverCount *int32 `json:"maxFailoverCount,omitempty"`
@@ -308,9 +391,9 @@ type TiDBSpec struct {
SeparateSlowLog *bool `json:"separateSlowLog,omitempty"`
// Whether enable the TLS connection between the SQL client and TiDB server
- // Optional: Defaults to false
+ // Optional: Defaults to nil
// +optional
- EnableTLSClient *bool `json:"enableTLSClient,omitempty"`
+ TLSClient *TiDBTLSClient `json:"tlsClient,omitempty"`
// The spec of the slow log tailer sidecar
// +optional
@@ -447,6 +530,24 @@ type ComponentSpec struct {
// Optional: Defaults to cluster-level setting
// +optional
ConfigUpdateStrategy *ConfigUpdateStrategy `json:"configUpdateStrategy,omitempty"`
+
+ // List of environment variables to set in the container, like
+ // v1.Container.Env.
+ // Note that following env names cannot be used and may be overrided by
+ // tidb-operator built envs.
+ // - NAMESPACE
+ // - TZ
+ // - SERVICE_NAME
+ // - PEER_SERVICE_NAME
+ // - HEADLESS_SERVICE_NAME
+ // - SET_NAME
+ // - HOSTNAME
+ // - CLUSTER_NAME
+ // - POD_NAME
+ // - BINLOG_ENABLED
+ // - SLOW_LOG_FILE
+ // +optional
+ Env []corev1.EnvVar `json:"env,omitempty"`
}
// +k8s:openapi-gen=true
@@ -505,6 +606,7 @@ type PDStatus struct {
Leader PDMember `json:"leader,omitempty"`
FailureMembers map[string]PDFailureMember `json:"failureMembers,omitempty"`
UnjoinedMembers map[string]UnjoinedMember `json:"unjoinedMembers,omitempty"`
+ Image string `json:"image,omitempty"`
}
// PDMember is PD member
@@ -542,6 +644,7 @@ type TiDBStatus struct {
Members map[string]TiDBMember `json:"members,omitempty"`
FailureMembers map[string]TiDBFailureMember `json:"failureMembers,omitempty"`
ResignDDLOwnerRetryCount int32 `json:"resignDDLOwnerRetryCount,omitempty"`
+ Image string `json:"image,omitempty"`
}
// TiDBMember is TiDB member
@@ -568,6 +671,18 @@ type TiKVStatus struct {
Stores map[string]TiKVStore `json:"stores,omitempty"`
TombstoneStores map[string]TiKVStore `json:"tombstoneStores,omitempty"`
FailureStores map[string]TiKVFailureStore `json:"failureStores,omitempty"`
+ Image string `json:"image,omitempty"`
+}
+
+// TiFlashStatus is TiFlash status
+type TiFlashStatus struct {
+ Synced bool `json:"synced,omitempty"`
+ Phase MemberPhase `json:"phase,omitempty"`
+ StatefulSet *apps.StatefulSetStatus `json:"statefulSet,omitempty"`
+ Stores map[string]TiKVStore `json:"stores,omitempty"`
+ TombstoneStores map[string]TiKVStore `json:"tombstoneStores,omitempty"`
+ FailureStores map[string]TiKVFailureStore `json:"failureStores,omitempty"`
+ Image string `json:"image,omitempty"`
}
// TiKVStores is either Up/Down/Offline/Tombstone
@@ -596,6 +711,55 @@ type PumpStatus struct {
StatefulSet *apps.StatefulSetStatus `json:"statefulSet,omitempty"`
}
+// TiDBTLSClient can enable TLS connection between TiDB server and MySQL client
+type TiDBTLSClient struct {
+ // When enabled, TiDB will accept TLS encrypted connections from MySQL client
+ // The steps to enable this feature:
+ // 1. Generate a TiDB server-side certificate and a client-side certifiacete for the TiDB cluster.
+ // There are multiple ways to generate certificates:
+ // - user-provided certificates: https://pingcap.com/docs/stable/how-to/secure/enable-tls-clients/
+ // - use the K8s built-in certificate signing system signed certificates: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
+ // - or use cert-manager signed certificates: https://cert-manager.io/
+ // 2. Create a K8s Secret object which contains the TiDB server-side certificate created above.
+ // The name of this Secret must be: -tidb-server-secret.
+ // kubectl create secret generic -tidb-server-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt=
+ // 3. Create a K8s Secret object which contains the TiDB client-side certificate created above which will be used by TiDB Operator.
+ // The name of this Secret must be: -tidb-client-secret.
+ // kubectl create secret generic -tidb-client-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt=
+ // 4. Set Enabled to `true`.
+ // +optional
+ Enabled bool `json:"enabled,omitempty"`
+ // Specify a secret of client cert for backup/restore
+ // Optional: Defaults to -tidb-client-secret
+ // +optional
+ // If you want to specify a secret for backup/restore, generate a Secret Object according to the third step of the above procedure, The difference is the Secret Name can be freely defined, and then copy the Secret Name to TLSSecret
+ // this field only work in backup/restore process
+ TLSSecret string `json:"tlsSecret,omitempty"`
+}
+
+// TLSCluster can enable TLS connection between TiDB server components
+// https://pingcap.com/docs/stable/how-to/secure/enable-tls-between-components/
+type TLSCluster struct {
+ // Enable mutual TLS authentication among TiDB components
+ // Once enabled, the mutual authentication applies to all components,
+ // and it does not support applying to only part of the components.
+ // The steps to enable this feature:
+ // 1. Generate TiDB server components certificates and a client-side certifiacete for them.
+ // There are multiple ways to generate these certificates:
+ // - user-provided certificates: https://pingcap.com/docs/stable/how-to/secure/generate-self-signed-certificates/
+ // - use the K8s built-in certificate signing system signed certificates: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
+ // - or use cert-manager signed certificates: https://cert-manager.io/
+ // 2. Create one secret object for one component which contains the certificates created above.
+ // The name of this Secret must be: --cluster-secret.
+ // For PD: kubectl create secret generic -pd-cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt=
+ // For TiKV: kubectl create secret generic -tikv-cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt=
+ // For TiDB: kubectl create secret generic -tidb-cluster-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt=
+ // For Client: kubectl create secret generic -cluster-client-secret --namespace= --from-file=tls.crt= --from-file=tls.key= --from-file=ca.crt=
+ // Same for other components.
+ // +optional
+ Enabled bool `json:"enabled,omitempty"`
+}
+
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -674,7 +838,7 @@ type S3StorageProvider struct {
Acl string `json:"acl,omitempty"`
// SecretName is the name of secret which stores
// S3 compliant storage access key and secret key.
- SecretName string `json:"secretName"`
+ SecretName string `json:"secretName,omitempty"`
// Prefix for the keys.
Prefix string `json:"prefix,omitempty"`
// SSE Sever-Side Encryption.
@@ -730,6 +894,10 @@ type TiDBAccessConfig struct {
User string `json:"user,omitempty"`
// SecretName is the name of secret which stores tidb cluster's password.
SecretName string `json:"secretName"`
+ // Whether enable the TLS connection between the SQL client and TiDB server
+ // Optional: Defaults to nil
+ // +optional
+ TLSClient *TiDBTLSClient `json:"tlsClient,omitempty"`
}
// +k8s:openapi-gen=true
@@ -739,6 +907,10 @@ type BackupSpec struct {
From TiDBAccessConfig `json:"from,omitempty"`
// Type is the backup type for tidb cluster.
Type BackupType `json:"backupType,omitempty"`
+ // TikvGCLifeTime is to specify the safe gc life time for backup.
+ // The time limit during which data is retained for each GC, in the format of Go Duration.
+ // When a GC happens, the current time minus this value is the safe point.
+ TikvGCLifeTime *string `json:"tikvGCLifeTime,omitempty"`
// StorageProvider configures where and how backups should be stored.
StorageProvider `json:",inline"`
// The storageClassName of the persistent volume for Backup data storage.
@@ -749,23 +921,29 @@ type BackupSpec struct {
StorageSize string `json:"storageSize,omitempty"`
// BRConfig is the configs for BR
BR *BRConfig `json:"br,omitempty"`
+ // Base tolerations of backup Pods, components may add more tolerations upon this respectively
+ // +optional
+ Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+ // Affinity of backup Pods
+ // +optional
+ Affinity *corev1.Affinity `json:"affinity,omitempty"`
+ // Use KMS to decrypt the secrets
+ UseKMS bool `json:"useKMS,omitempty"`
+ // Specify service account of backup
+ ServiceAccount string `json:"serviceAccount,omitempty"`
}
// +k8s:openapi-gen=true
// BRConfig contains config for BR
type BRConfig struct {
- // PDAddress is the PD address of the tidb cluster
- PDAddress string `json:"pd"`
+ // ClusterName of backup/restore cluster
+ Cluster string `json:"cluster"`
+ // Namespace of backup/restore cluster
+ ClusterNamespace string `json:"clusterNamespace,omitempty"`
// DB is the specific DB which will be backed-up or restored
DB string `json:"db,omitempty"`
// Table is the specific table which will be backed-up or restored
Table string `json:"table,omitempty"`
- // CA is the CA certificate path for TLS connection
- CA string `json:"ca,omitempty"`
- // Cert is the certificate path for TLS connection
- Cert string `json:"cert,omitempty"`
- // Key is the private key path for TLS connection
- Key string `json:"key,omitempty"`
// LogLevel is the log level
LogLevel string `json:"logLevel,omitempty"`
// StatusAddr is the HTTP listening address for the status report service. Set to empty string to disable
@@ -949,6 +1127,10 @@ type RestoreSpec struct {
To TiDBAccessConfig `json:"to,omitempty"`
// Type is the backup type for tidb cluster.
Type BackupType `json:"backupType,omitempty"`
+ // TikvGCLifeTime is to specify the safe gc life time for restore.
+ // The time limit during which data is retained for each GC, in the format of Go Duration.
+ // When a GC happens, the current time minus this value is the safe point.
+ TikvGCLifeTime *string `json:"tikvGCLifeTime,omitempty"`
// StorageProvider configures where and how backups should be stored.
StorageProvider `json:",inline"`
// The storageClassName of the persistent volume for Restore data storage.
@@ -959,6 +1141,16 @@ type RestoreSpec struct {
StorageSize string `json:"storageSize,omitempty"`
// BR is the configs for BR.
BR *BRConfig `json:"br,omitempty"`
+ // Base tolerations of restore Pods, components may add more tolerations upon this respectively
+ // +optional
+ Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+ // Affinity of restore Pods
+ // +optional
+ Affinity *corev1.Affinity `json:"affinity,omitempty"`
+ // Use KMS to decrypt the secrets
+ UseKMS bool `json:"useKMS,omitempty"`
+ // Specify service account of restore
+ ServiceAccount string `json:"serviceAccount,omitempty"`
}
// RestoreStatus represents the current status of a tidb cluster restore.
diff --git a/pkg/apis/pingcap/v1alpha1/validation/validation.go b/pkg/apis/pingcap/v1alpha1/validation/validation.go
index 728f6b80fc..5f5fd799a3 100644
--- a/pkg/apis/pingcap/v1alpha1/validation/validation.go
+++ b/pkg/apis/pingcap/v1alpha1/validation/validation.go
@@ -16,11 +16,15 @@ package validation
import (
"encoding/json"
"fmt"
+ "os"
"reflect"
+ "strings"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/label"
+ corev1 "k8s.io/api/core/v1"
apivalidation "k8s.io/apimachinery/pkg/api/validation"
+ "k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
)
@@ -31,13 +35,230 @@ func ValidateTidbCluster(tc *v1alpha1.TidbCluster) field.ErrorList {
// validate metadata
fldPath := field.NewPath("metadata")
// validate metadata/annotations
- allErrs = append(allErrs, apivalidation.ValidateAnnotations(tc.ObjectMeta.Annotations, fldPath.Child("annotations"))...)
+ allErrs = append(allErrs, validateAnnotations(tc.ObjectMeta.Annotations, fldPath.Child("annotations"))...)
+ // validate spec
+ allErrs = append(allErrs, validateTiDBClusterSpec(&tc.Spec, field.NewPath("spec"))...)
+ return allErrs
+}
+
+func validateAnnotations(anns map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, apivalidation.ValidateAnnotations(anns, fldPath)...)
for _, key := range []string{label.AnnPDDeleteSlots, label.AnnTiDBDeleteSlots, label.AnnTiKVDeleteSlots} {
- allErrs = append(allErrs, validateDeleteSlots(tc.ObjectMeta.Annotations, key, fldPath.Child("annotations", key))...)
+ allErrs = append(allErrs, validateDeleteSlots(anns, key, fldPath.Child(key))...)
+ }
+ return allErrs
+}
+
+func validateTiDBClusterSpec(spec *v1alpha1.TidbClusterSpec, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, validatePDSpec(&spec.PD, fldPath.Child("pd"))...)
+ allErrs = append(allErrs, validateTiKVSpec(&spec.TiKV, fldPath.Child("tikv"))...)
+ allErrs = append(allErrs, validateTiDBSpec(&spec.TiDB, fldPath.Child("tidb"))...)
+ if spec.Pump != nil {
+ allErrs = append(allErrs, validatePumpSpec(spec.Pump, fldPath.Child("pump"))...)
+ }
+ if spec.TiFlash != nil {
+ allErrs = append(allErrs, validateTiFlashSpec(spec.TiFlash, fldPath.Child("tiflash"))...)
+ }
+ return allErrs
+}
+
+func validatePDSpec(spec *v1alpha1.PDSpec, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, validateComponentSpec(&spec.ComponentSpec, fldPath)...)
+ return allErrs
+}
+
+func validateTiKVSpec(spec *v1alpha1.TiKVSpec, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, validateComponentSpec(&spec.ComponentSpec, fldPath)...)
+ return allErrs
+}
+
+func validateTiFlashSpec(spec *v1alpha1.TiFlashSpec, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, validateComponentSpec(&spec.ComponentSpec, fldPath)...)
+ allErrs = append(allErrs, validateTiFlashConfig(spec.Config, fldPath)...)
+ if len(spec.StorageClaims) < 1 {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("spec.StorageClaims"),
+ spec.StorageClaims, "storageClaims should be configured at least one item."))
+ }
+ return allErrs
+}
+
+func validateTiFlashConfig(config *v1alpha1.TiFlashConfig, path *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if config == nil {
+ return allErrs
+ }
+
+ if config.CommonConfig != nil {
+ if config.CommonConfig.Flash != nil {
+ if config.CommonConfig.Flash.OverlapThreshold != nil {
+ if *config.CommonConfig.Flash.OverlapThreshold < 0 || *config.CommonConfig.Flash.OverlapThreshold > 1 {
+ allErrs = append(allErrs, field.Invalid(path.Child("config.config.flash.overlap_threshold"),
+ config.CommonConfig.Flash.OverlapThreshold,
+ "overlap_threshold must be in the range of [0,1]."))
+ }
+ }
+ if config.CommonConfig.Flash.FlashCluster != nil {
+ if config.CommonConfig.Flash.FlashCluster.ClusterLog != "" {
+ splitPath := strings.Split(config.CommonConfig.Flash.FlashCluster.ClusterLog, string(os.PathSeparator))
+ // The log path should be at least /dir/base.log
+ if len(splitPath) < 3 {
+ allErrs = append(allErrs, field.Invalid(path.Child("config.config.flash.flash_cluster.log"),
+ config.CommonConfig.Flash.FlashCluster.ClusterLog,
+ "log path should include at least one level dir."))
+ }
+ }
+ }
+ if config.CommonConfig.Flash.FlashProxy != nil {
+ if config.CommonConfig.Flash.FlashProxy.LogFile != "" {
+ splitPath := strings.Split(config.CommonConfig.Flash.FlashProxy.LogFile, string(os.PathSeparator))
+ // The log path should be at least /dir/base.log
+ if len(splitPath) < 3 {
+ allErrs = append(allErrs, field.Invalid(path.Child("config.config.flash.flash_proxy.log-file"),
+ config.CommonConfig.Flash.FlashProxy.LogFile,
+ "log path should include at least one level dir."))
+ }
+ }
+ }
+ }
+ if config.CommonConfig.FlashLogger != nil {
+ if config.CommonConfig.FlashLogger.ServerLog != "" {
+ splitPath := strings.Split(config.CommonConfig.FlashLogger.ServerLog, string(os.PathSeparator))
+ // The log path should be at least /dir/base.log
+ if len(splitPath) < 3 {
+ allErrs = append(allErrs, field.Invalid(path.Child("config.config.logger.log"),
+ config.CommonConfig.FlashLogger.ServerLog,
+ "log path should include at least one level dir."))
+ }
+ }
+ if config.CommonConfig.FlashLogger.ErrorLog != "" {
+ splitPath := strings.Split(config.CommonConfig.FlashLogger.ErrorLog, string(os.PathSeparator))
+ // The log path should be at least /dir/base.log
+ if len(splitPath) < 3 {
+ allErrs = append(allErrs, field.Invalid(path.Child("config.config.logger.errorlog"),
+ config.CommonConfig.FlashLogger.ErrorLog,
+ "log path should include at least one level dir."))
+ }
+ }
+ }
}
return allErrs
}
+func validateTiDBSpec(spec *v1alpha1.TiDBSpec, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, validateComponentSpec(&spec.ComponentSpec, fldPath)...)
+ return allErrs
+}
+
+func validatePumpSpec(spec *v1alpha1.PumpSpec, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ allErrs = append(allErrs, validateComponentSpec(&spec.ComponentSpec, fldPath)...)
+ return allErrs
+}
+
+func validateComponentSpec(spec *v1alpha1.ComponentSpec, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ // TODO validate other fields
+ allErrs = append(allErrs, validateEnv(spec.Env, fldPath.Child("env"))...)
+ return allErrs
+}
+
+// validateEnv validates env vars
+func validateEnv(vars []corev1.EnvVar, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ for i, ev := range vars {
+ idxPath := fldPath.Index(i)
+ if len(ev.Name) == 0 {
+ allErrs = append(allErrs, field.Required(idxPath.Child("name"), ""))
+ } else {
+ for _, msg := range validation.IsEnvVarName(ev.Name) {
+ allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), ev.Name, msg))
+ }
+ }
+ allErrs = append(allErrs, validateEnvVarValueFrom(ev, idxPath.Child("valueFrom"))...)
+ }
+ return allErrs
+}
+
+func validateEnvVarValueFrom(ev corev1.EnvVar, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ if ev.ValueFrom == nil {
+ return allErrs
+ }
+
+ numSources := 0
+
+ if ev.ValueFrom.FieldRef != nil {
+ numSources++
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldRef"), "", "fieldRef is not supported"))
+ }
+ if ev.ValueFrom.ResourceFieldRef != nil {
+ numSources++
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceFieldRef"), "", "resourceFieldRef is not supported"))
+ }
+ if ev.ValueFrom.ConfigMapKeyRef != nil {
+ numSources++
+ allErrs = append(allErrs, validateConfigMapKeySelector(ev.ValueFrom.ConfigMapKeyRef, fldPath.Child("configMapKeyRef"))...)
+ }
+ if ev.ValueFrom.SecretKeyRef != nil {
+ numSources++
+ allErrs = append(allErrs, validateSecretKeySelector(ev.ValueFrom.SecretKeyRef, fldPath.Child("secretKeyRef"))...)
+ }
+
+ if numSources == 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath, "", "must specify one of: `configMapKeyRef` or `secretKeyRef`"))
+ } else if len(ev.Value) != 0 {
+ if numSources != 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath, "", "may not be specified when `value` is not empty"))
+ }
+ } else if numSources > 1 {
+ allErrs = append(allErrs, field.Invalid(fldPath, "", "may not have more than one field specified at a time"))
+ }
+
+ return allErrs
+}
+
+func validateConfigMapKeySelector(s *corev1.ConfigMapKeySelector, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ for _, msg := range apivalidation.NameIsDNSSubdomain(s.Name, false) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), s.Name, msg))
+ }
+ if len(s.Key) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("key"), ""))
+ } else {
+ for _, msg := range validation.IsConfigMapKey(s.Key) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg))
+ }
+ }
+
+ return allErrs
+}
+
+func validateSecretKeySelector(s *corev1.SecretKeySelector, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+
+ for _, msg := range apivalidation.NameIsDNSSubdomain(s.Name, false) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), s.Name, msg))
+ }
+ if len(s.Key) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("key"), ""))
+ } else {
+ for _, msg := range validation.IsConfigMapKey(s.Key) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg))
+ }
+ }
+
+ return allErrs
+}
+
// ValidateCreateTidbCLuster validates a newly created TidbCluster
func ValidateCreateTidbCluster(tc *v1alpha1.TidbCluster) field.ErrorList {
allErrs := field.ErrorList{}
@@ -88,15 +309,6 @@ func validateNewTidbClusterSpec(spec *v1alpha1.TidbClusterSpec, path *field.Path
if spec.PD.Image != "" {
allErrs = append(allErrs, field.Invalid(path.Child("pd.image"), spec.PD.Image, "image has been deprecated, use baseImage instead"))
}
- if spec.TiDB.Config == nil {
- allErrs = append(allErrs, field.Invalid(path.Child("tidb.config"), spec.TiDB.Config, "tidb.config must not be nil"))
- }
- if spec.TiKV.Config == nil {
- allErrs = append(allErrs, field.Invalid(path.Child("tikv.config"), spec.TiKV.Config, "tidb.config must not be nil"))
- }
- if spec.PD.Config == nil {
- allErrs = append(allErrs, field.Invalid(path.Child("pd.config"), spec.PD.Config, "tidb.config must not be nil"))
- }
return allErrs
}
@@ -135,6 +347,12 @@ func validateUpdatePDConfig(old, conf *v1alpha1.PDConfig, path *field.Path) fiel
if old == nil || conf == nil {
return allErrs
}
+
+ if conf.Security != nil && len(conf.Security.CertAllowedCN) > 1 {
+ allErrs = append(allErrs, field.Invalid(path.Child("security.cert-allowed-cn"), conf.Security.CertAllowedCN,
+ "Only one CN is currently supported"))
+ }
+
if !reflect.DeepEqual(old.Schedule, conf.Schedule) {
allErrs = append(allErrs, field.Invalid(path.Child("schedule"), conf.Schedule,
"PD Schedule Config is immutable through CRD, please modify with pd-ctl instead."))
diff --git a/pkg/apis/pingcap/v1alpha1/validation/validation_test.go b/pkg/apis/pingcap/v1alpha1/validation/validation_test.go
index fccae35014..1324d7bbec 100644
--- a/pkg/apis/pingcap/v1alpha1/validation/validation_test.go
+++ b/pkg/apis/pingcap/v1alpha1/validation/validation_test.go
@@ -23,8 +23,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
)
-// TODO: more UTs
-func TestValidateDeletedSlots(t *testing.T) {
+func TestValidateAnnotations(t *testing.T) {
successCases := []struct {
name string
tc v1alpha1.TidbCluster
@@ -83,7 +82,7 @@ func TestValidateDeletedSlots(t *testing.T) {
}
for _, v := range successCases {
- if errs := ValidateTidbCluster(&v.tc); len(errs) != 0 {
+ if errs := validateAnnotations(v.tc.ObjectMeta.Annotations, field.NewPath("metadata", "annotations")); len(errs) != 0 {
t.Errorf("[%s]: unexpected error: %v", v.name, errs)
}
}
@@ -160,7 +159,7 @@ func TestValidateDeletedSlots(t *testing.T) {
}
for _, v := range errorCases {
- errs := ValidateTidbCluster(&v.tc)
+ errs := validateAnnotations(v.tc.ObjectMeta.Annotations, field.NewPath("metadata", "annotations"))
if len(errs) != len(v.errs) {
t.Errorf("[%s]: expected %d failures, got %d failures: %v", v.name, len(v.errs), len(errs), errs)
continue
diff --git a/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go
index ceb14c194d..f0d0944c49 100644
--- a/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go
@@ -264,7 +264,12 @@ func (in *BackupScheduleStatus) DeepCopy() *BackupScheduleStatus {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupSpec) DeepCopyInto(out *BackupSpec) {
*out = *in
- out.From = in.From
+ in.From.DeepCopyInto(&out.From)
+ if in.TikvGCLifeTime != nil {
+ in, out := &in.TikvGCLifeTime, &out.TikvGCLifeTime
+ *out = new(string)
+ **out = **in
+ }
in.StorageProvider.DeepCopyInto(&out.StorageProvider)
if in.StorageClassName != nil {
in, out := &in.StorageClassName, &out.StorageClassName
@@ -276,6 +281,18 @@ func (in *BackupSpec) DeepCopyInto(out *BackupSpec) {
*out = new(BRConfig)
(*in).DeepCopyInto(*out)
}
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]v1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Affinity != nil {
+ in, out := &in.Affinity, &out.Affinity
+ *out = new(v1.Affinity)
+ (*in).DeepCopyInto(*out)
+ }
return
}
@@ -339,6 +356,21 @@ func (in *BasicAutoScalerSpec) DeepCopyInto(out *BasicAutoScalerSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.MetricsTimeDuration != nil {
+ in, out := &in.MetricsTimeDuration, &out.MetricsTimeDuration
+ *out = new(string)
+ **out = **in
+ }
+ if in.ScaleOutThreshold != nil {
+ in, out := &in.ScaleOutThreshold, &out.ScaleOutThreshold
+ *out = new(int32)
+ **out = **in
+ }
+ if in.ScaleInThreshold != nil {
+ in, out := &in.ScaleInThreshold, &out.ScaleInThreshold
+ *out = new(int32)
+ **out = **in
+ }
return
}
@@ -352,9 +384,44 @@ func (in *BasicAutoScalerSpec) DeepCopy() *BasicAutoScalerSpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BasicAutoScalerStatus) DeepCopyInto(out *BasicAutoScalerStatus) {
+ *out = *in
+ if in.MetricsStatusList != nil {
+ in, out := &in.MetricsStatusList, &out.MetricsStatusList
+ *out = make([]MetricsStatus, len(*in))
+ copy(*out, *in)
+ }
+ if in.RecommendedReplicas != nil {
+ in, out := &in.RecommendedReplicas, &out.RecommendedReplicas
+ *out = new(int32)
+ **out = **in
+ }
+ if in.LastAutoScalingTimestamp != nil {
+ in, out := &in.LastAutoScalingTimestamp, &out.LastAutoScalingTimestamp
+ *out = (*in).DeepCopy()
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAutoScalerStatus.
+func (in *BasicAutoScalerStatus) DeepCopy() *BasicAutoScalerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(BasicAutoScalerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Binlog) DeepCopyInto(out *Binlog) {
*out = *in
+ if in.Enable != nil {
+ in, out := &in.Enable, &out.Enable
+ *out = new(bool)
+ **out = **in
+ }
if in.WriteTimeout != nil {
in, out := &in.WriteTimeout, &out.WriteTimeout
*out = new(string)
@@ -388,6 +455,92 @@ func (in *Binlog) DeepCopy() *Binlog {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CommonConfig) DeepCopyInto(out *CommonConfig) {
+ *out = *in
+ if in.PathRealtimeMode != nil {
+ in, out := &in.PathRealtimeMode, &out.PathRealtimeMode
+ *out = new(bool)
+ **out = **in
+ }
+ if in.MarkCacheSize != nil {
+ in, out := &in.MarkCacheSize, &out.MarkCacheSize
+ *out = new(int64)
+ **out = **in
+ }
+ if in.MinmaxIndexCacheSize != nil {
+ in, out := &in.MinmaxIndexCacheSize, &out.MinmaxIndexCacheSize
+ *out = new(int64)
+ **out = **in
+ }
+ if in.TCPPort != nil {
+ in, out := &in.TCPPort, &out.TCPPort
+ *out = new(int32)
+ **out = **in
+ }
+ if in.HTTPPort != nil {
+ in, out := &in.HTTPPort, &out.HTTPPort
+ *out = new(int32)
+ **out = **in
+ }
+ if in.InternalServerHTTPPort != nil {
+ in, out := &in.InternalServerHTTPPort, &out.InternalServerHTTPPort
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Flash != nil {
+ in, out := &in.Flash, &out.Flash
+ *out = new(Flash)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.FlashLogger != nil {
+ in, out := &in.FlashLogger, &out.FlashLogger
+ *out = new(FlashLogger)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.FlashApplication != nil {
+ in, out := &in.FlashApplication, &out.FlashApplication
+ *out = new(FlashApplication)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.FlashRaft != nil {
+ in, out := &in.FlashRaft, &out.FlashRaft
+ *out = new(FlashRaft)
+ **out = **in
+ }
+ if in.FlashStatus != nil {
+ in, out := &in.FlashStatus, &out.FlashStatus
+ *out = new(FlashStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.FlashQuota != nil {
+ in, out := &in.FlashQuota, &out.FlashQuota
+ *out = new(FlashQuota)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.FlashUser != nil {
+ in, out := &in.FlashUser, &out.FlashUser
+ *out = new(FlashUser)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.FlashProfile != nil {
+ in, out := &in.FlashProfile, &out.FlashProfile
+ *out = new(FlashProfile)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonConfig.
+func (in *CommonConfig) DeepCopy() *CommonConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(CommonConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ComponentSpec) DeepCopyInto(out *ComponentSpec) {
*out = *in
@@ -452,6 +605,13 @@ func (in *ComponentSpec) DeepCopyInto(out *ComponentSpec) {
*out = new(ConfigUpdateStrategy)
**out = **in
}
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]v1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
return
}
@@ -465,6 +625,42 @@ func (in *ComponentSpec) DeepCopy() *ComponentSpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CoprocessorCache) DeepCopyInto(out *CoprocessorCache) {
+ *out = *in
+ if in.Enabled != nil {
+ in, out := &in.Enabled, &out.Enabled
+ *out = new(bool)
+ **out = **in
+ }
+ if in.CapacityMB != nil {
+ in, out := &in.CapacityMB, &out.CapacityMB
+ *out = new(float64)
+ **out = **in
+ }
+ if in.AdmissionMaxResultMB != nil {
+ in, out := &in.AdmissionMaxResultMB, &out.AdmissionMaxResultMB
+ *out = new(float64)
+ **out = **in
+ }
+ if in.AdmissionMinProcessMs != nil {
+ in, out := &in.AdmissionMinProcessMs, &out.AdmissionMinProcessMs
+ *out = new(uint64)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoprocessorCache.
+func (in *CoprocessorCache) DeepCopy() *CoprocessorCache {
+ if in == nil {
+ return nil
+ }
+ out := new(CoprocessorCache)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CrdKind) DeepCopyInto(out *CrdKind) {
*out = *in
@@ -514,6 +710,22 @@ func (in *CrdKinds) DeepCopy() *CrdKinds {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DashboardConfig) DeepCopyInto(out *DashboardConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardConfig.
+func (in *DashboardConfig) DeepCopy() *DashboardConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(DashboardConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DataResource) DeepCopyInto(out *DataResource) {
*out = *in
@@ -578,9 +790,55 @@ func (in *DataResourceList) DeepCopyObject() runtime.Object {
return nil
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Experimental) DeepCopyInto(out *Experimental) {
+ *out = *in
+ if in.AllowAutoRandom != nil {
+ in, out := &in.AllowAutoRandom, &out.AllowAutoRandom
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Experimental.
+func (in *Experimental) DeepCopy() *Experimental {
+ if in == nil {
+ return nil
+ }
+ out := new(Experimental)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FileLogConfig) DeepCopyInto(out *FileLogConfig) {
*out = *in
+ if in.Filename != nil {
+ in, out := &in.Filename, &out.Filename
+ *out = new(string)
+ **out = **in
+ }
+ if in.LogRotate != nil {
+ in, out := &in.LogRotate, &out.LogRotate
+ *out = new(bool)
+ **out = **in
+ }
+ if in.MaxSize != nil {
+ in, out := &in.MaxSize, &out.MaxSize
+ *out = new(int)
+ **out = **in
+ }
+ if in.MaxDays != nil {
+ in, out := &in.MaxDays, &out.MaxDays
+ *out = new(int)
+ **out = **in
+ }
+ if in.MaxBackups != nil {
+ in, out := &in.MaxBackups, &out.MaxBackups
+ *out = new(int)
+ **out = **in
+ }
return
}
@@ -594,6 +852,258 @@ func (in *FileLogConfig) DeepCopy() *FileLogConfig {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Flash) DeepCopyInto(out *Flash) {
+ *out = *in
+ if in.OverlapThreshold != nil {
+ in, out := &in.OverlapThreshold, &out.OverlapThreshold
+ *out = new(float64)
+ **out = **in
+ }
+ if in.CompactLogMinPeriod != nil {
+ in, out := &in.CompactLogMinPeriod, &out.CompactLogMinPeriod
+ *out = new(int32)
+ **out = **in
+ }
+ if in.FlashCluster != nil {
+ in, out := &in.FlashCluster, &out.FlashCluster
+ *out = new(FlashCluster)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.FlashProxy != nil {
+ in, out := &in.FlashProxy, &out.FlashProxy
+ *out = new(FlashProxy)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Flash.
+func (in *Flash) DeepCopy() *Flash {
+ if in == nil {
+ return nil
+ }
+ out := new(Flash)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FlashApplication) DeepCopyInto(out *FlashApplication) {
+ *out = *in
+ if in.RunAsDaemon != nil {
+ in, out := &in.RunAsDaemon, &out.RunAsDaemon
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlashApplication.
+func (in *FlashApplication) DeepCopy() *FlashApplication {
+ if in == nil {
+ return nil
+ }
+ out := new(FlashApplication)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FlashCluster) DeepCopyInto(out *FlashCluster) {
+ *out = *in
+ if in.RefreshInterval != nil {
+ in, out := &in.RefreshInterval, &out.RefreshInterval
+ *out = new(int32)
+ **out = **in
+ }
+ if in.UpdateRuleInterval != nil {
+ in, out := &in.UpdateRuleInterval, &out.UpdateRuleInterval
+ *out = new(int32)
+ **out = **in
+ }
+ if in.MasterTTL != nil {
+ in, out := &in.MasterTTL, &out.MasterTTL
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlashCluster.
+func (in *FlashCluster) DeepCopy() *FlashCluster {
+ if in == nil {
+ return nil
+ }
+ out := new(FlashCluster)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FlashLogger) DeepCopyInto(out *FlashLogger) {
+ *out = *in
+ if in.Count != nil {
+ in, out := &in.Count, &out.Count
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlashLogger.
+func (in *FlashLogger) DeepCopy() *FlashLogger {
+ if in == nil {
+ return nil
+ }
+ out := new(FlashLogger)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FlashProfile) DeepCopyInto(out *FlashProfile) {
+ *out = *in
+ if in.Readonly != nil {
+ in, out := &in.Readonly, &out.Readonly
+ *out = new(Profile)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Default != nil {
+ in, out := &in.Default, &out.Default
+ *out = new(Profile)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlashProfile.
+func (in *FlashProfile) DeepCopy() *FlashProfile {
+ if in == nil {
+ return nil
+ }
+ out := new(FlashProfile)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FlashProxy) DeepCopyInto(out *FlashProxy) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlashProxy.
+func (in *FlashProxy) DeepCopy() *FlashProxy {
+ if in == nil {
+ return nil
+ }
+ out := new(FlashProxy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FlashQuota) DeepCopyInto(out *FlashQuota) {
+ *out = *in
+ if in.Default != nil {
+ in, out := &in.Default, &out.Default
+ *out = new(Quota)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlashQuota.
+func (in *FlashQuota) DeepCopy() *FlashQuota {
+ if in == nil {
+ return nil
+ }
+ out := new(FlashQuota)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FlashRaft) DeepCopyInto(out *FlashRaft) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlashRaft.
+func (in *FlashRaft) DeepCopy() *FlashRaft {
+ if in == nil {
+ return nil
+ }
+ out := new(FlashRaft)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FlashServerConfig) DeepCopyInto(out *FlashServerConfig) {
+ *out = *in
+ in.TiKVServerConfig.DeepCopyInto(&out.TiKVServerConfig)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlashServerConfig.
+func (in *FlashServerConfig) DeepCopy() *FlashServerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(FlashServerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FlashStatus) DeepCopyInto(out *FlashStatus) {
+ *out = *in
+ if in.MetricsPort != nil {
+ in, out := &in.MetricsPort, &out.MetricsPort
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlashStatus.
+func (in *FlashStatus) DeepCopy() *FlashStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(FlashStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FlashUser) DeepCopyInto(out *FlashUser) {
+ *out = *in
+ if in.Readonly != nil {
+ in, out := &in.Readonly, &out.Readonly
+ *out = new(User)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Default != nil {
+ in, out := &in.Default, &out.Default
+ *out = new(User)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlashUser.
+func (in *FlashUser) DeepCopy() *FlashUser {
+ if in == nil {
+ return nil
+ }
+ out := new(FlashUser)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GcsStorageProvider) DeepCopyInto(out *GcsStorageProvider) {
*out = *in
@@ -685,6 +1195,73 @@ func (in *InitializerSpec) DeepCopy() *InitializerSpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Interval) DeepCopyInto(out *Interval) {
+ *out = *in
+ if in.Duration != nil {
+ in, out := &in.Duration, &out.Duration
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Queries != nil {
+ in, out := &in.Queries, &out.Queries
+ *out = new(int32)
+ **out = **in
+ }
+ if in.Errors != nil {
+ in, out := &in.Errors, &out.Errors
+ *out = new(int32)
+ **out = **in
+ }
+ if in.ResultRows != nil {
+ in, out := &in.ResultRows, &out.ResultRows
+ *out = new(int32)
+ **out = **in
+ }
+ if in.ReadRows != nil {
+ in, out := &in.ReadRows, &out.ReadRows
+ *out = new(int32)
+ **out = **in
+ }
+ if in.ExecutionTime != nil {
+ in, out := &in.ExecutionTime, &out.ExecutionTime
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Interval.
+func (in *Interval) DeepCopy() *Interval {
+ if in == nil {
+ return nil
+ }
+ out := new(Interval)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IsolationRead) DeepCopyInto(out *IsolationRead) {
+ *out = *in
+ if in.Engines != nil {
+ in, out := &in.Engines, &out.Engines
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IsolationRead.
+func (in *IsolationRead) DeepCopy() *IsolationRead {
+ if in == nil {
+ return nil
+ }
+ out := new(IsolationRead)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Log) DeepCopyInto(out *Log) {
*out = *in
@@ -703,9 +1280,29 @@ func (in *Log) DeepCopyInto(out *Log) {
*out = new(bool)
**out = **in
}
+ if in.EnableTimestamp != nil {
+ in, out := &in.EnableTimestamp, &out.EnableTimestamp
+ *out = new(bool)
+ **out = **in
+ }
+ if in.EnableErrorStack != nil {
+ in, out := &in.EnableErrorStack, &out.EnableErrorStack
+ *out = new(bool)
+ **out = **in
+ }
if in.File != nil {
in, out := &in.File, &out.File
*out = new(FileLogConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.EnableSlowLog != nil {
+ in, out := &in.EnableSlowLog, &out.EnableSlowLog
+ *out = new(bool)
+ **out = **in
+ }
+ if in.SlowQueryFile != nil {
+ in, out := &in.SlowQueryFile, &out.SlowQueryFile
+ *out = new(string)
**out = **in
}
if in.SlowThreshold != nil {
@@ -723,6 +1320,11 @@ func (in *Log) DeepCopyInto(out *Log) {
*out = new(uint64)
**out = **in
}
+ if in.RecordPlanInSlowLog != nil {
+ in, out := &in.RecordPlanInSlowLog, &out.RecordPlanInSlowLog
+ *out = new(uint32)
+ **out = **in
+ }
return
}
@@ -731,7 +1333,72 @@ func (in *Log) DeepCopy() *Log {
if in == nil {
return nil
}
- out := new(Log)
+ out := new(Log)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LogTailerSpec) DeepCopyInto(out *LogTailerSpec) {
+ *out = *in
+ in.ResourceRequirements.DeepCopyInto(&out.ResourceRequirements)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogTailerSpec.
+func (in *LogTailerSpec) DeepCopy() *LogTailerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(LogTailerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MasterKeyFileConfig) DeepCopyInto(out *MasterKeyFileConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterKeyFileConfig.
+func (in *MasterKeyFileConfig) DeepCopy() *MasterKeyFileConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(MasterKeyFileConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MasterKeyKMSConfig) DeepCopyInto(out *MasterKeyKMSConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterKeyKMSConfig.
+func (in *MasterKeyKMSConfig) DeepCopy() *MasterKeyKMSConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(MasterKeyKMSConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MetricsStatus) DeepCopyInto(out *MetricsStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsStatus.
+func (in *MetricsStatus) DeepCopy() *MetricsStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(MetricsStatus)
in.DeepCopyInto(out)
return out
}
@@ -758,6 +1425,22 @@ func (in *MonitorContainer) DeepCopy() *MonitorContainer {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Networks) DeepCopyInto(out *Networks) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networks.
+func (in *Networks) DeepCopy() *Networks {
+ if in == nil {
+ return nil
+ }
+ out := new(Networks)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OpenTracing) DeepCopyInto(out *OpenTracing) {
*out = *in
@@ -911,7 +1594,7 @@ func (in *PDConfig) DeepCopyInto(out *PDConfig) {
if in.Security != nil {
in, out := &in.Security, &out.Security
*out = new(PDSecurityConfig)
- **out = **in
+ (*in).DeepCopyInto(*out)
}
if in.LabelProperty != nil {
in, out := &in.LabelProperty, &out.LabelProperty
@@ -932,6 +1615,11 @@ func (in *PDConfig) DeepCopyInto(out *PDConfig) {
}
}
}
+ if in.Dashboard != nil {
+ in, out := &in.Dashboard, &out.Dashboard
+ *out = new(DashboardConfig)
+ **out = **in
+ }
return
}
@@ -1003,7 +1691,7 @@ func (in *PDLogConfig) DeepCopyInto(out *PDLogConfig) {
if in.File != nil {
in, out := &in.File, &out.File
*out = new(FileLogConfig)
- **out = **in
+ (*in).DeepCopyInto(*out)
}
if in.Development != nil {
in, out := &in.Development, &out.Development
@@ -1142,7 +1830,7 @@ func (in *PDReplicationConfig) DeepCopyInto(out *PDReplicationConfig) {
}
if in.LocationLabels != nil {
in, out := &in.LocationLabels, &out.LocationLabels
- *out = make(StringSlice, len(*in))
+ *out = make([]string, len(*in))
copy(*out, *in)
}
if in.StrictlyMatchLabel != nil {
@@ -1150,6 +1838,11 @@ func (in *PDReplicationConfig) DeepCopyInto(out *PDReplicationConfig) {
*out = new(bool)
**out = **in
}
+ if in.EnablePlacementRules != nil {
+ in, out := &in.EnablePlacementRules, &out.EnablePlacementRules
+ *out = new(bool)
+ **out = **in
+ }
return
}
@@ -1277,6 +1970,23 @@ func (in *PDScheduleConfig) DeepCopyInto(out *PDScheduleConfig) {
}
}
}
+ if in.SchedulersPayload != nil {
+ in, out := &in.SchedulersPayload, &out.SchedulersPayload
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.EnableOneWayMerge != nil {
+ in, out := &in.EnableOneWayMerge, &out.EnableOneWayMerge
+ *out = new(bool)
+ **out = **in
+ }
+ if in.EnableCrossTableMerge != nil {
+ in, out := &in.EnableCrossTableMerge, &out.EnableCrossTableMerge
+ *out = new(bool)
+ **out = **in
+ }
return
}
@@ -1341,6 +2051,11 @@ func (in PDSchedulerConfigs) DeepCopy() PDSchedulerConfigs {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PDSecurityConfig) DeepCopyInto(out *PDSecurityConfig) {
*out = *in
+ if in.CertAllowedCN != nil {
+ in, out := &in.CertAllowedCN, &out.CertAllowedCN
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
return
}
@@ -1362,6 +2077,11 @@ func (in *PDServerConfig) DeepCopyInto(out *PDServerConfig) {
*out = new(bool)
**out = **in
}
+ if in.MetricStorage != nil {
+ in, out := &in.MetricStorage, &out.MetricStorage
+ *out = new(string)
+ **out = **in
+ }
return
}
@@ -1385,6 +2105,11 @@ func (in *PDSpec) DeepCopyInto(out *PDSpec) {
*out = new(ServiceSpec)
(*in).DeepCopyInto(*out)
}
+ if in.MaxFailoverCount != nil {
+ in, out := &in.MaxFailoverCount, &out.MaxFailoverCount
+ *out = new(int32)
+ **out = **in
+ }
if in.StorageClassName != nil {
in, out := &in.StorageClassName, &out.StorageClassName
*out = new(string)
@@ -1500,26 +2225,11 @@ func (in *Performance) DeepCopyInto(out *Performance) {
*out = new(uint64)
**out = **in
}
- if in.TCPKeepAlive != nil {
- in, out := &in.TCPKeepAlive, &out.TCPKeepAlive
- *out = new(bool)
- **out = **in
- }
- if in.CrossJoin != nil {
- in, out := &in.CrossJoin, &out.CrossJoin
- *out = new(bool)
- **out = **in
- }
if in.StatsLease != nil {
in, out := &in.StatsLease, &out.StatsLease
*out = new(string)
**out = **in
}
- if in.RunAutoAnalyze != nil {
- in, out := &in.RunAutoAnalyze, &out.RunAutoAnalyze
- *out = new(bool)
- **out = **in
- }
if in.StmtCountLimit != nil {
in, out := &in.StmtCountLimit, &out.StmtCountLimit
*out = new(uint)
@@ -1550,13 +2260,28 @@ func (in *Performance) DeepCopyInto(out *Performance) {
*out = new(string)
**out = **in
}
- if in.TxnEntryCountLimit != nil {
- in, out := &in.TxnEntryCountLimit, &out.TxnEntryCountLimit
+ if in.TxnTotalSizeLimit != nil {
+ in, out := &in.TxnTotalSizeLimit, &out.TxnTotalSizeLimit
*out = new(uint64)
**out = **in
}
- if in.TxnTotalSizeLimit != nil {
- in, out := &in.TxnTotalSizeLimit, &out.TxnTotalSizeLimit
+ if in.TCPKeepAlive != nil {
+ in, out := &in.TCPKeepAlive, &out.TCPKeepAlive
+ *out = new(bool)
+ **out = **in
+ }
+ if in.CrossJoin != nil {
+ in, out := &in.CrossJoin, &out.CrossJoin
+ *out = new(bool)
+ **out = **in
+ }
+ if in.RunAutoAnalyze != nil {
+ in, out := &in.RunAutoAnalyze, &out.RunAutoAnalyze
+ *out = new(bool)
+ **out = **in
+ }
+ if in.TxnEntryCountLimit != nil {
+ in, out := &in.TxnEntryCountLimit, &out.TxnEntryCountLimit
*out = new(uint64)
**out = **in
}
@@ -1687,6 +2412,42 @@ func (in *PreparedPlanCache) DeepCopy() *PreparedPlanCache {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Profile) DeepCopyInto(out *Profile) {
+ *out = *in
+ if in.Readonly != nil {
+ in, out := &in.Readonly, &out.Readonly
+ *out = new(int32)
+ **out = **in
+ }
+ if in.MaxMemoryUsage != nil {
+ in, out := &in.MaxMemoryUsage, &out.MaxMemoryUsage
+ *out = new(int64)
+ **out = **in
+ }
+ if in.UseUncompressedCache != nil {
+ in, out := &in.UseUncompressedCache, &out.UseUncompressedCache
+ *out = new(int32)
+ **out = **in
+ }
+ if in.LoadBalancing != nil {
+ in, out := &in.LoadBalancing, &out.LoadBalancing
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Profile.
+func (in *Profile) DeepCopy() *Profile {
+ if in == nil {
+ return nil
+ }
+ out := new(Profile)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) {
*out = *in
@@ -1705,6 +2466,82 @@ func (in *PrometheusSpec) DeepCopy() *PrometheusSpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) {
+ *out = *in
+ if in.PanicWhenUnexpectedKeyOrData != nil {
+ in, out := &in.PanicWhenUnexpectedKeyOrData, &out.PanicWhenUnexpectedKeyOrData
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Server != nil {
+ in, out := &in.Server, &out.Server
+ *out = new(FlashServerConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Storage != nil {
+ in, out := &in.Storage, &out.Storage
+ *out = new(TiKVStorageConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Raftstore != nil {
+ in, out := &in.Raftstore, &out.Raftstore
+ *out = new(TiKVRaftstoreConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Rocksdb != nil {
+ in, out := &in.Rocksdb, &out.Rocksdb
+ *out = new(TiKVDbConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Coprocessor != nil {
+ in, out := &in.Coprocessor, &out.Coprocessor
+ *out = new(TiKVCoprocessorConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ReadPool != nil {
+ in, out := &in.ReadPool, &out.ReadPool
+ *out = new(TiKVReadPoolConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.RaftDB != nil {
+ in, out := &in.RaftDB, &out.RaftDB
+ *out = new(TiKVRaftDBConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Import != nil {
+ in, out := &in.Import, &out.Import
+ *out = new(TiKVImportConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.GC != nil {
+ in, out := &in.GC, &out.GC
+ *out = new(TiKVGCConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.PD != nil {
+ in, out := &in.PD, &out.PD
+ *out = new(TiKVPDConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Security != nil {
+ in, out := &in.Security, &out.Security
+ *out = new(TiKVSecurityConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig.
+func (in *ProxyConfig) DeepCopy() *ProxyConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ProxyConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProxyProtocol) DeepCopyInto(out *ProxyProtocol) {
*out = *in
@@ -1781,6 +2618,27 @@ func (in *PumpStatus) DeepCopy() *PumpStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Quota) DeepCopyInto(out *Quota) {
+ *out = *in
+ if in.Interval != nil {
+ in, out := &in.Interval, &out.Interval
+ *out = new(Interval)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Quota.
+func (in *Quota) DeepCopy() *Quota {
+ if in == nil {
+ return nil
+ }
+ out := new(Quota)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReloaderSpec) DeepCopyInto(out *ReloaderSpec) {
*out = *in
@@ -1880,7 +2738,12 @@ func (in *RestoreList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RestoreSpec) DeepCopyInto(out *RestoreSpec) {
*out = *in
- out.To = in.To
+ in.To.DeepCopyInto(&out.To)
+ if in.TikvGCLifeTime != nil {
+ in, out := &in.TikvGCLifeTime, &out.TikvGCLifeTime
+ *out = new(string)
+ **out = **in
+ }
in.StorageProvider.DeepCopyInto(&out.StorageProvider)
if in.StorageClassName != nil {
in, out := &in.StorageClassName, &out.StorageClassName
@@ -1892,6 +2755,18 @@ func (in *RestoreSpec) DeepCopyInto(out *RestoreSpec) {
*out = new(BRConfig)
(*in).DeepCopyInto(*out)
}
+ if in.Tolerations != nil {
+ in, out := &in.Tolerations, &out.Tolerations
+ *out = make([]v1.Toleration, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Affinity != nil {
+ in, out := &in.Affinity, &out.Affinity
+ *out = new(v1.Affinity)
+ (*in).DeepCopyInto(*out)
+ }
return
}
@@ -1984,6 +2859,11 @@ func (in *Security) DeepCopyInto(out *Security) {
*out = new(string)
**out = **in
}
+ if in.ClusterVerifyCN != nil {
+ in, out := &in.ClusterVerifyCN, &out.ClusterVerifyCN
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
return
}
@@ -2054,11 +2934,6 @@ func (in *ServiceSpec) DeepCopy() *ServiceSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Status) DeepCopyInto(out *Status) {
*out = *in
- if in.ReportStatus != nil {
- in, out := &in.ReportStatus, &out.ReportStatus
- *out = new(bool)
- **out = **in
- }
if in.MetricsAddr != nil {
in, out := &in.MetricsAddr, &out.MetricsAddr
*out = new(string)
@@ -2069,6 +2944,11 @@ func (in *Status) DeepCopyInto(out *Status) {
*out = new(uint)
**out = **in
}
+ if in.ReportStatus != nil {
+ in, out := &in.ReportStatus, &out.ReportStatus
+ *out = new(bool)
+ **out = **in
+ }
if in.RecordQPSbyDB != nil {
in, out := &in.RecordQPSbyDB, &out.RecordQPSbyDB
*out = new(bool)
@@ -2090,25 +2970,62 @@ func (in *Status) DeepCopy() *Status {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StmtSummary) DeepCopyInto(out *StmtSummary) {
*out = *in
- if in.MaxStmtCount != nil {
- in, out := &in.MaxStmtCount, &out.MaxStmtCount
- *out = new(uint)
- **out = **in
- }
- if in.MaxSQLLength != nil {
- in, out := &in.MaxSQLLength, &out.MaxSQLLength
- *out = new(uint)
+ if in.Enable != nil {
+ in, out := &in.Enable, &out.Enable
+ *out = new(bool)
+ **out = **in
+ }
+ if in.MaxStmtCount != nil {
+ in, out := &in.MaxStmtCount, &out.MaxStmtCount
+ *out = new(uint)
+ **out = **in
+ }
+ if in.MaxSQLLength != nil {
+ in, out := &in.MaxSQLLength, &out.MaxSQLLength
+ *out = new(uint)
+ **out = **in
+ }
+ if in.RefreshInterval != nil {
+ in, out := &in.RefreshInterval, &out.RefreshInterval
+ *out = new(int)
+ **out = **in
+ }
+ if in.HistorySize != nil {
+ in, out := &in.HistorySize, &out.HistorySize
+ *out = new(int)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StmtSummary.
+func (in *StmtSummary) DeepCopy() *StmtSummary {
+ if in == nil {
+ return nil
+ }
+ out := new(StmtSummary)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageClaim) DeepCopyInto(out *StorageClaim) {
+ *out = *in
+ in.Resources.DeepCopyInto(&out.Resources)
+ if in.StorageClassName != nil {
+ in, out := &in.StorageClassName, &out.StorageClassName
+ *out = new(string)
**out = **in
}
return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StmtSummary.
-func (in *StmtSummary) DeepCopy() *StmtSummary {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClaim.
+func (in *StorageClaim) DeepCopy() *StorageClaim {
if in == nil {
return nil
}
- out := new(StmtSummary)
+ out := new(StorageClaim)
in.DeepCopyInto(out)
return out
}
@@ -2140,28 +3057,29 @@ func (in *StorageProvider) DeepCopy() *StorageProvider {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in StringSlice) DeepCopyInto(out *StringSlice) {
- {
- in := &in
- *out = make(StringSlice, len(*in))
- copy(*out, *in)
- return
- }
+func (in *TLSCluster) DeepCopyInto(out *TLSCluster) {
+ *out = *in
+ return
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSlice.
-func (in StringSlice) DeepCopy() StringSlice {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSCluster.
+func (in *TLSCluster) DeepCopy() *TLSCluster {
if in == nil {
return nil
}
- out := new(StringSlice)
+ out := new(TLSCluster)
in.DeepCopyInto(out)
- return *out
+ return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TiDBAccessConfig) DeepCopyInto(out *TiDBAccessConfig) {
*out = *in
+ if in.TLSClient != nil {
+ in, out := &in.TLSClient, &out.TLSClient
+ *out = new(TiDBTLSClient)
+ **out = **in
+ }
return
}
@@ -2323,6 +3241,51 @@ func (in *TiDBConfig) DeepCopyInto(out *TiDBConfig) {
*out = new(StmtSummary)
(*in).DeepCopyInto(*out)
}
+ if in.RepairMode != nil {
+ in, out := &in.RepairMode, &out.RepairMode
+ *out = new(bool)
+ **out = **in
+ }
+ if in.RepairTableList != nil {
+ in, out := &in.RepairTableList, &out.RepairTableList
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.IsolationRead != nil {
+ in, out := &in.IsolationRead, &out.IsolationRead
+ *out = new(IsolationRead)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.MaxServerConnections != nil {
+ in, out := &in.MaxServerConnections, &out.MaxServerConnections
+ *out = new(uint32)
+ **out = **in
+ }
+ if in.NewCollationsEnabledOnFirstBootstrap != nil {
+ in, out := &in.NewCollationsEnabledOnFirstBootstrap, &out.NewCollationsEnabledOnFirstBootstrap
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Experimental != nil {
+ in, out := &in.Experimental, &out.Experimental
+ *out = new(Experimental)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.EnableDynamicConfig != nil {
+ in, out := &in.EnableDynamicConfig, &out.EnableDynamicConfig
+ *out = new(bool)
+ **out = **in
+ }
+ if in.EnableTableLock != nil {
+ in, out := &in.EnableTableLock, &out.EnableTableLock
+ *out = new(bool)
+ **out = **in
+ }
+ if in.DelayCleanTableLock != nil {
+ in, out := &in.DelayCleanTableLock, &out.DelayCleanTableLock
+ *out = new(uint64)
+ **out = **in
+ }
return
}
@@ -2449,9 +3412,9 @@ func (in *TiDBSpec) DeepCopyInto(out *TiDBSpec) {
*out = new(bool)
**out = **in
}
- if in.EnableTLSClient != nil {
- in, out := &in.EnableTLSClient, &out.EnableTLSClient
- *out = new(bool)
+ if in.TLSClient != nil {
+ in, out := &in.TLSClient, &out.TLSClient
+ *out = new(TiDBTLSClient)
**out = **in
}
if in.SlowLogTailer != nil {
@@ -2517,6 +3480,135 @@ func (in *TiDBStatus) DeepCopy() *TiDBStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TiDBTLSClient) DeepCopyInto(out *TiDBTLSClient) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiDBTLSClient.
+func (in *TiDBTLSClient) DeepCopy() *TiDBTLSClient {
+ if in == nil {
+ return nil
+ }
+ out := new(TiDBTLSClient)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TiFlashConfig) DeepCopyInto(out *TiFlashConfig) {
+ *out = *in
+ if in.CommonConfig != nil {
+ in, out := &in.CommonConfig, &out.CommonConfig
+ *out = new(CommonConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ProxyConfig != nil {
+ in, out := &in.ProxyConfig, &out.ProxyConfig
+ *out = new(ProxyConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiFlashConfig.
+func (in *TiFlashConfig) DeepCopy() *TiFlashConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(TiFlashConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TiFlashSpec) DeepCopyInto(out *TiFlashSpec) {
+ *out = *in
+ in.ComponentSpec.DeepCopyInto(&out.ComponentSpec)
+ in.ResourceRequirements.DeepCopyInto(&out.ResourceRequirements)
+ if in.Privileged != nil {
+ in, out := &in.Privileged, &out.Privileged
+ *out = new(bool)
+ **out = **in
+ }
+ if in.MaxFailoverCount != nil {
+ in, out := &in.MaxFailoverCount, &out.MaxFailoverCount
+ *out = new(int32)
+ **out = **in
+ }
+ if in.StorageClaims != nil {
+ in, out := &in.StorageClaims, &out.StorageClaims
+ *out = make([]StorageClaim, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = new(TiFlashConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LogTailer != nil {
+ in, out := &in.LogTailer, &out.LogTailer
+ *out = new(LogTailerSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiFlashSpec.
+func (in *TiFlashSpec) DeepCopy() *TiFlashSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(TiFlashSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TiFlashStatus) DeepCopyInto(out *TiFlashStatus) {
+ *out = *in
+ if in.StatefulSet != nil {
+ in, out := &in.StatefulSet, &out.StatefulSet
+ *out = new(appsv1.StatefulSetStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Stores != nil {
+ in, out := &in.Stores, &out.Stores
+ *out = make(map[string]TiKVStore, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.TombstoneStores != nil {
+ in, out := &in.TombstoneStores, &out.TombstoneStores
+ *out = make(map[string]TiKVStore, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.FailureStores != nil {
+ in, out := &in.FailureStores, &out.FailureStores
+ *out = make(map[string]TiKVFailureStore, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiFlashStatus.
+func (in *TiFlashStatus) DeepCopy() *TiFlashStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(TiFlashStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TiKVBlockCacheConfig) DeepCopyInto(out *TiKVBlockCacheConfig) {
*out = *in
@@ -2747,6 +3839,11 @@ func (in *TiKVClient) DeepCopyInto(out *TiKVClient) {
*out = new(uint)
**out = **in
}
+ if in.CoprCache != nil {
+ in, out := &in.CoprCache, &out.CoprCache
+ *out = new(CoprocessorCache)
+ (*in).DeepCopyInto(*out)
+ }
return
}
@@ -2821,7 +3918,12 @@ func (in *TiKVConfig) DeepCopyInto(out *TiKVConfig) {
if in.Security != nil {
in, out := &in.Security, &out.Security
*out = new(TiKVSecurityConfig)
- **out = **in
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Encryption != nil {
+ in, out := &in.Encryption, &out.Encryption
+ *out = new(TiKVEncryptionConfig)
+ (*in).DeepCopyInto(*out)
}
return
}
@@ -3019,6 +4121,32 @@ func (in *TiKVDbConfig) DeepCopy() *TiKVDbConfig {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TiKVEncryptionConfig) DeepCopyInto(out *TiKVEncryptionConfig) {
+ *out = *in
+ if in.MasterKey != nil {
+ in, out := &in.MasterKey, &out.MasterKey
+ *out = new(TiKVMasterKeyConfig)
+ **out = **in
+ }
+ if in.PreviousMasterKey != nil {
+ in, out := &in.PreviousMasterKey, &out.PreviousMasterKey
+ *out = new(TiKVMasterKeyConfig)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiKVEncryptionConfig.
+func (in *TiKVEncryptionConfig) DeepCopy() *TiKVEncryptionConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(TiKVEncryptionConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TiKVFailureStore) DeepCopyInto(out *TiKVFailureStore) {
*out = *in
@@ -3098,6 +4226,24 @@ func (in *TiKVImportConfig) DeepCopy() *TiKVImportConfig {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TiKVMasterKeyConfig) DeepCopyInto(out *TiKVMasterKeyConfig) {
+ *out = *in
+ out.MasterKeyFileConfig = in.MasterKeyFileConfig
+ out.MasterKeyKMSConfig = in.MasterKeyKMSConfig
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TiKVMasterKeyConfig.
+func (in *TiKVMasterKeyConfig) DeepCopy() *TiKVMasterKeyConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(TiKVMasterKeyConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TiKVPDConfig) DeepCopyInto(out *TiKVPDConfig) {
*out = *in
@@ -3350,6 +4496,11 @@ func (in *TiKVReadPoolConfig) DeepCopy() *TiKVReadPoolConfig {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TiKVSecurityConfig) DeepCopyInto(out *TiKVSecurityConfig) {
*out = *in
+ if in.CertAllowedCN != nil {
+ in, out := &in.CertAllowedCN, &out.CertAllowedCN
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
return
}
@@ -3710,6 +4861,23 @@ func (in *TidbAutoScalerSpec) DeepCopy() *TidbAutoScalerSpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TidbAutoScalerStatus) DeepCopyInto(out *TidbAutoScalerStatus) {
+ *out = *in
+ in.BasicAutoScalerStatus.DeepCopyInto(&out.BasicAutoScalerStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TidbAutoScalerStatus.
+func (in *TidbAutoScalerStatus) DeepCopy() *TidbAutoScalerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(TidbAutoScalerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TidbCluster) DeepCopyInto(out *TidbCluster) {
*out = *in
@@ -3744,7 +4912,7 @@ func (in *TidbClusterAutoScaler) DeepCopyInto(out *TidbClusterAutoScaler) {
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
+ in.Status.DeepCopyInto(&out.Status)
return
}
@@ -3808,6 +4976,11 @@ func (in *TidbClusterAutoScalerSpec) DeepCopyInto(out *TidbClusterAutoScalerSpec
*out = new(string)
**out = **in
}
+ if in.Monitor != nil {
+ in, out := &in.Monitor, &out.Monitor
+ *out = new(TidbMonitorRef)
+ **out = **in
+ }
if in.TiKV != nil {
in, out := &in.TiKV, &out.TiKV
*out = new(TikvAutoScalerSpec)
@@ -3834,6 +5007,16 @@ func (in *TidbClusterAutoScalerSpec) DeepCopy() *TidbClusterAutoScalerSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TidbClusterAutoSclaerStatus) DeepCopyInto(out *TidbClusterAutoSclaerStatus) {
*out = *in
+ if in.TiKV != nil {
+ in, out := &in.TiKV, &out.TiKV
+ *out = new(TikvAutoScalerStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.TiDB != nil {
+ in, out := &in.TiDB, &out.TiDB
+ *out = new(TidbAutoScalerStatus)
+ (*in).DeepCopyInto(*out)
+ }
return
}
@@ -3902,6 +5085,11 @@ func (in *TidbClusterSpec) DeepCopyInto(out *TidbClusterSpec) {
in.PD.DeepCopyInto(&out.PD)
in.TiDB.DeepCopyInto(&out.TiDB)
in.TiKV.DeepCopyInto(&out.TiKV)
+ if in.TiFlash != nil {
+ in, out := &in.TiFlash, &out.TiFlash
+ *out = new(TiFlashSpec)
+ (*in).DeepCopyInto(*out)
+ }
if in.Pump != nil {
in, out := &in.Pump, &out.Pump
*out = new(PumpSpec)
@@ -3917,9 +5105,9 @@ func (in *TidbClusterSpec) DeepCopyInto(out *TidbClusterSpec) {
*out = new(bool)
**out = **in
}
- if in.EnableTLSCluster != nil {
- in, out := &in.EnableTLSCluster, &out.EnableTLSCluster
- *out = new(bool)
+ if in.TLSCluster != nil {
+ in, out := &in.TLSCluster, &out.TLSCluster
+ *out = new(TLSCluster)
**out = **in
}
if in.HostNetwork != nil {
@@ -3983,6 +5171,7 @@ func (in *TidbClusterStatus) DeepCopyInto(out *TidbClusterStatus) {
in.TiKV.DeepCopyInto(&out.TiKV)
in.TiDB.DeepCopyInto(&out.TiDB)
in.Pump.DeepCopyInto(&out.Pump)
+ in.TiFlash.DeepCopyInto(&out.TiFlash)
return
}
@@ -4182,6 +5371,22 @@ func (in *TidbMonitorList) DeepCopyObject() runtime.Object {
return nil
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TidbMonitorRef) DeepCopyInto(out *TidbMonitorRef) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TidbMonitorRef.
+func (in *TidbMonitorRef) DeepCopy() *TidbMonitorRef {
+ if in == nil {
+ return nil
+ }
+ out := new(TidbMonitorRef)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TidbMonitorSpec) DeepCopyInto(out *TidbMonitorSpec) {
*out = *in
@@ -4280,6 +5485,23 @@ func (in *TikvAutoScalerSpec) DeepCopy() *TikvAutoScalerSpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TikvAutoScalerStatus) DeepCopyInto(out *TikvAutoScalerStatus) {
+ *out = *in
+ in.BasicAutoScalerStatus.DeepCopyInto(&out.BasicAutoScalerStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TikvAutoScalerStatus.
+func (in *TikvAutoScalerStatus) DeepCopy() *TikvAutoScalerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(TikvAutoScalerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TxnLocalLatches) DeepCopyInto(out *TxnLocalLatches) {
*out = *in
@@ -4322,3 +5544,24 @@ func (in *UnjoinedMember) DeepCopy() *UnjoinedMember {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *User) DeepCopyInto(out *User) {
+ *out = *in
+ if in.Networks != nil {
+ in, out := &in.Networks, &out.Networks
+ *out = new(Networks)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User.
+func (in *User) DeepCopy() *User {
+ if in == nil {
+ return nil
+ }
+ out := new(User)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/pkg/apiserver/cmd/start.go b/pkg/apiserver/cmd/start.go
index c373d84da9..b257c4dffa 100644
--- a/pkg/apiserver/cmd/start.go
+++ b/pkg/apiserver/cmd/start.go
@@ -123,7 +123,7 @@ func NewCommandStartServer(builders []*builders.APIGroupBuilder, stopCh <-chan s
klog.InitFlags(klogFlags)
flags.AddGoFlagSet(klogFlags)
- // Sync the glog and klog flags.
+ // Sync the klog and klog flags.
klogFlags.VisitAll(func(f *flag.Flag) {
goFlag := flag.CommandLine.Lookup(f.Name)
if goFlag != nil {
diff --git a/pkg/apiserver/storage/apiserver.go b/pkg/apiserver/storage/apiserver.go
index 5af65ed273..ed6169e364 100644
--- a/pkg/apiserver/storage/apiserver.go
+++ b/pkg/apiserver/storage/apiserver.go
@@ -21,7 +21,7 @@ import (
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/storage/storagebackend/factory"
"k8s.io/client-go/rest"
- glog "k8s.io/klog"
+ "k8s.io/klog"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/storage"
@@ -59,7 +59,7 @@ func (f *ApiServerRestOptionsFactory) newApiServerStorageDecorator() generic.Sto
) (storage.Interface, factory.DestroyFunc, error) {
cli, err := versioned.NewForConfig(f.RestConfig)
if err != nil {
- glog.Fatalf("failed to create Clientset: %v", err)
+ klog.Fatalf("failed to create Clientset: %v", err)
}
objectType := newFunc()
return NewApiServerStore(cli, f.Codec, f.StorageNamespace, objectType, newListFunc)
diff --git a/pkg/autoscaler/autoscaler/autoscaler_manager.go b/pkg/autoscaler/autoscaler/autoscaler_manager.go
index 79cca7491c..6fffa03eb3 100644
--- a/pkg/autoscaler/autoscaler/autoscaler_manager.go
+++ b/pkg/autoscaler/autoscaler/autoscaler_manager.go
@@ -15,30 +15,47 @@ package autoscaler
import (
"fmt"
+ "strconv"
+ "strings"
+ "time"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned"
informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions"
v1alpha1listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1"
- promClient "github.com/prometheus/client_golang/api"
+ "github.com/pingcap/tidb-operator/pkg/controller"
+ "github.com/pingcap/tidb-operator/pkg/label"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
kubeinformers "k8s.io/client-go/informers"
appslisters "k8s.io/client-go/listers/apps/v1"
"k8s.io/client-go/tools/record"
+ "k8s.io/client-go/util/retry"
"k8s.io/klog"
)
type autoScalerManager struct {
- tcLister v1alpha1listers.TidbClusterLister
+ cli versioned.Interface
+ tcControl controller.TidbClusterControlInterface
+ taLister v1alpha1listers.TidbClusterAutoScalerLister
stsLister appslisters.StatefulSetLister
recorder record.EventRecorder
}
func NewAutoScalerManager(
+ cli versioned.Interface,
informerFactory informers.SharedInformerFactory,
kubeInformerFactory kubeinformers.SharedInformerFactory,
recorder record.EventRecorder) *autoScalerManager {
+ tcLister := informerFactory.Pingcap().V1alpha1().TidbClusters().Lister()
+ stsLister := kubeInformerFactory.Apps().V1().StatefulSets().Lister()
return &autoScalerManager{
- tcLister: informerFactory.Pingcap().V1alpha1().TidbClusters().Lister(),
- stsLister: kubeInformerFactory.Apps().V1().StatefulSets().Lister(),
+ cli: cli,
+ tcControl: controller.NewRealTidbClusterControl(cli, tcLister, recorder),
+ taLister: informerFactory.Pingcap().V1alpha1().TidbClusterAutoScalers().Lister(),
+ stsLister: stsLister,
recorder: recorder,
}
}
@@ -54,47 +71,131 @@ func (am *autoScalerManager) Sync(tac *v1alpha1.TidbClusterAutoScaler) error {
tac.Spec.Cluster.Namespace = tac.Namespace
}
- tcNamespace := tac.Spec.Cluster.Namespace
- tc, err := am.tcLister.TidbClusters(tcNamespace).Get(tcName)
+ tc, err := am.cli.PingcapV1alpha1().TidbClusters(tac.Spec.Cluster.Namespace).Get(tcName, metav1.GetOptions{})
if err != nil {
+ if errors.IsNotFound(err) {
+ // Target TidbCluster Ref is deleted, empty the auto-scaling status
+ resetAutoScalingAnn(tac)
+ return nil
+ }
return err
}
- oldTCSpec := tc.Spec.DeepCopy()
+ checkAndUpdateTacAnn(tac)
+ oldTc := tc.DeepCopy()
if err := am.syncAutoScaling(tc, tac); err != nil {
return err
}
- if err := am.syncTidbClusterReplicas(tc, oldTCSpec); err != nil {
+ if err := am.syncTidbClusterReplicas(tac, tc, oldTc); err != nil {
return err
}
- return am.syncAutoScalingStatus(tc, oldTCSpec, tac)
+ return am.updateAutoScaling(oldTc, tac)
}
func (am *autoScalerManager) syncAutoScaling(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler) error {
- if tac.Spec.MetricsUrl == nil {
- return fmt.Errorf("tidbclusterAutoScaler[%s/%s]' metrics url should be defined explicitly", tac.Namespace, tac.Name)
- }
- client, err := promClient.NewClient(promClient.Config{Address: *tac.Spec.MetricsUrl})
- if err != nil {
- return err
- }
defaultTAC(tac)
- if err := am.syncTiKV(tc, tac, client); err != nil {
- return err
+ oldTikvReplicas := tc.Spec.TiKV.Replicas
+ if err := am.syncTiKV(tc, tac); err != nil {
+ tc.Spec.TiKV.Replicas = oldTikvReplicas
+ klog.Errorf("tac[%s/%s] tikv sync failed, continue to sync next, err:%v", tac.Namespace, tac.Name, err)
}
- if err := am.syncTiDB(tc, tac, client); err != nil {
- return err
+ oldTidbReplicas := tc.Spec.TiDB.Replicas
+ if err := am.syncTiDB(tc, tac); err != nil {
+ tc.Spec.TiDB.Replicas = oldTidbReplicas
+ klog.Errorf("tac[%s/%s] tidb sync failed, continue to sync next, err:%v", tac.Namespace, tac.Name, err)
}
klog.Infof("tc[%s/%s]'s tac[%s/%s] synced", tc.Namespace, tc.Name, tac.Namespace, tac.Name)
return nil
}
-//TODO: sync TidbCluster.Spec.Replicas
-func (am *autoScalerManager) syncTidbClusterReplicas(tc *v1alpha1.TidbCluster, oldTCSpec *v1alpha1.TidbClusterSpec) error {
+func (am *autoScalerManager) syncTidbClusterReplicas(tac *v1alpha1.TidbClusterAutoScaler, tc *v1alpha1.TidbCluster, oldTc *v1alpha1.TidbCluster) error {
+ if tc.Spec.TiDB.Replicas == oldTc.Spec.TiDB.Replicas && tc.Spec.TiKV.Replicas == oldTc.Spec.TiKV.Replicas {
+ return nil
+ }
+ newTc := tc.DeepCopy()
+ _, err := am.tcControl.UpdateTidbCluster(newTc, &newTc.Status, &oldTc.Status)
+ if err != nil {
+ return err
+ }
+ reason := fmt.Sprintf("Successful %s", strings.Title("auto-scaling"))
+ msg := ""
+ if tc.Spec.TiDB.Replicas != oldTc.Spec.TiDB.Replicas {
+ msg = fmt.Sprintf("%s auto-scaling tidb from %d to %d", msg, oldTc.Spec.TiDB.Replicas, tc.Spec.TiDB.Replicas)
+ }
+ if tc.Spec.TiKV.Replicas != oldTc.Spec.TiKV.Replicas {
+ msg = fmt.Sprintf("%s auto-scaling tikv from %d to %d", msg, oldTc.Spec.TiKV.Replicas, tc.Spec.TiKV.Replicas)
+ }
+ am.recorder.Event(tac, corev1.EventTypeNormal, reason, msg)
return nil
}
-//TODO: sync tac status
-func (am *autoScalerManager) syncAutoScalingStatus(tc *v1alpha1.TidbCluster, oldTCSpec *v1alpha1.TidbClusterSpec,
+func (am *autoScalerManager) updateAutoScaling(oldTc *v1alpha1.TidbCluster,
tac *v1alpha1.TidbClusterAutoScaler) error {
- return nil
+ if tac.Annotations == nil {
+ tac.Annotations = map[string]string{}
+ }
+ f := func(key string) (*time.Time, error) {
+ v, ok := tac.Annotations[key]
+ if ok {
+ ts, err := strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ klog.Errorf("failed to convert label[%s] key to int64, err:%v", key, err)
+ return nil, err
+ }
+ t := time.Unix(ts, 0)
+ return &t, nil
+ }
+ return nil, nil
+ }
+
+ if tac.Spec.TiKV != nil {
+ tac.Status.TiKV.CurrentReplicas = oldTc.Status.TiKV.StatefulSet.CurrentReplicas
+ lastTimestamp, err := f(label.AnnTiKVLastAutoScalingTimestamp)
+ if err != nil {
+ return err
+ }
+ if lastTimestamp != nil {
+ tac.Status.TiKV.LastAutoScalingTimestamp = &metav1.Time{Time: *lastTimestamp}
+ }
+ } else {
+ tac.Status.TiKV = nil
+ }
+ if tac.Spec.TiDB != nil {
+ tac.Status.TiDB.CurrentReplicas = oldTc.Status.TiDB.StatefulSet.CurrentReplicas
+ lastTimestamp, err := f(label.AnnTiDBLastAutoScalingTimestamp)
+ if err != nil {
+ return err
+ }
+ if lastTimestamp != nil {
+ tac.Status.TiDB.LastAutoScalingTimestamp = &metav1.Time{Time: *lastTimestamp}
+ }
+ } else {
+ tac.Status.TiDB = nil
+ }
+ return am.updateTidbClusterAutoScaler(tac)
+}
+
+func (am *autoScalerManager) updateTidbClusterAutoScaler(tac *v1alpha1.TidbClusterAutoScaler) error {
+
+ ns := tac.GetNamespace()
+ tacName := tac.GetName()
+ oldTac := tac.DeepCopy()
+
+ // don't wait due to limited number of clients, but backoff after the default number of steps
+ return retry.RetryOnConflict(retry.DefaultRetry, func() error {
+ var updateErr error
+ _, updateErr = am.cli.PingcapV1alpha1().TidbClusterAutoScalers(ns).Update(tac)
+ if updateErr == nil {
+ klog.Infof("TidbClusterAutoScaler: [%s/%s] updated successfully", ns, tacName)
+ return nil
+ }
+ klog.Errorf("failed to update TidbClusterAutoScaler: [%s/%s], error: %v", ns, tacName, updateErr)
+ if updated, err := am.taLister.TidbClusterAutoScalers(ns).Get(tacName); err == nil {
+ // make a copy so we don't mutate the shared cache
+ tac = updated.DeepCopy()
+ tac.Annotations = oldTac.Annotations
+ } else {
+ utilruntime.HandleError(fmt.Errorf("error getting updated TidbClusterAutoScaler %s/%s from lister: %v", ns, tacName, err))
+ }
+ return updateErr
+ })
}
diff --git a/pkg/autoscaler/autoscaler/calculate/calculate.go b/pkg/autoscaler/autoscaler/calculate/calculate.go
new file mode 100644
index 0000000000..81f31a000b
--- /dev/null
+++ b/pkg/autoscaler/autoscaler/calculate/calculate.go
@@ -0,0 +1,116 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package calculate
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math"
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ promClient "github.com/prometheus/client_golang/api"
+ autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+const (
+ TikvSumCpuMetricsPattern = `sum(increase(tikv_thread_cpu_seconds_total{cluster="%s"}[%s])) by (instance)`
+ TidbSumCpuMetricsPattern = `sum(increase(process_cpu_seconds_total{cluster="%s",job="tidb"}[%s])) by (instance)`
+ InvalidTacMetricConfigureMsg = "tac[%s/%s] metric configuration invalid"
+ queryPath = "/api/v1/query"
+
+ float64EqualityThreshold = 1e-9
+ httpRequestTimeout = 5
+)
+
+type SingleQuery struct {
+ Endpoint string
+ Timestamp int64
+ Quary string
+ Instances []string
+ Metric autoscalingv2beta2.MetricSpec
+}
+
+func queryMetricsFromPrometheus(tac *v1alpha1.TidbClusterAutoScaler, client promClient.Client, sq *SingleQuery, resp *Response) error {
+ query := sq.Quary
+ timestamp := sq.Timestamp
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*httpRequestTimeout)
+ defer cancel()
+ req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s%s", sq.Endpoint, queryPath), nil)
+ if err != nil {
+ return err
+ }
+ q := req.URL.Query()
+ q.Add("query", query)
+ q.Add("time", fmt.Sprintf("%d", timestamp))
+ req.URL.RawQuery = q.Encode()
+ r, body, err := client.Do(req.Context(), req)
+ if err != nil {
+ return err
+ }
+ if r.StatusCode != http.StatusOK {
+ return fmt.Errorf("tac[%s/%s] query error, status code:%d", tac.Namespace, tac.Name, r.StatusCode)
+ }
+ err = json.Unmarshal(body, resp)
+ if err != nil {
+ return err
+ }
+ if resp.Status != statusSuccess {
+ return fmt.Errorf("tac[%s/%s] query error, response status: %v", tac.Namespace, tac.Name, resp.Status)
+ }
+ return nil
+}
+
+// sumForEachInstance sum the value in Response of each instance from Prometheus
+func sumForEachInstance(instances []string, resp *Response) (float64, error) {
+ if resp == nil {
+ return 0, fmt.Errorf("metrics response from Promethus can't be empty")
+ }
+ s := sets.String{}
+ for _, instance := range instances {
+ s.Insert(instance)
+ }
+ sum := 0.0
+ if len(resp.Data.Result) < 1 {
+ return 0, fmt.Errorf("metrics Response return zero info")
+ }
+ for _, r := range resp.Data.Result {
+ if s.Has(r.Metric.Instance) {
+ v, err := strconv.ParseFloat(r.Value[1].(string), 64)
+ if err != nil {
+ return 0.0, err
+ }
+ sum = sum + v
+ }
+ }
+ return sum, nil
+}
+
+// calculate func calculate the recommended replicas by given usageRatio and currentReplicas
+func calculate(currentValue float64, targetValue float64, currentReplicas int32) (int32, error) {
+ if almostEqual(targetValue, 0.0) {
+ return -1, fmt.Errorf("targetValue in calculate func can't be zero")
+ }
+ usageRatio := currentValue / targetValue
+ return int32(math.Ceil(usageRatio * float64(currentReplicas))), nil
+}
+
+func almostEqual(a, b float64) bool {
+ return math.Abs(a-b) <= float64EqualityThreshold
+}
diff --git a/pkg/autoscaler/autoscaler/calculate/calculate_test.go b/pkg/autoscaler/autoscaler/calculate/calculate_test.go
new file mode 100644
index 0000000000..899672fee3
--- /dev/null
+++ b/pkg/autoscaler/autoscaler/calculate/calculate_test.go
@@ -0,0 +1,74 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package calculate
+
+import (
+ . "github.com/onsi/gomega"
+ "testing"
+)
+
+func TestCalculate(t *testing.T) {
+ g := NewGomegaWithT(t)
+ tests := []struct {
+ name string
+ currentReplicas int32
+ currentValue float64
+ targetValue float64
+ expectedReplicas int32
+ errMsg string
+ }{
+ {
+ name: "under target value",
+ currentReplicas: 4,
+ currentValue: 20.0,
+ targetValue: 30.0,
+ expectedReplicas: 3,
+ },
+ {
+ name: "equal target value",
+ currentReplicas: 4,
+ currentValue: 30.0,
+ targetValue: 30.0,
+ expectedReplicas: 4,
+ },
+ {
+ name: "greater than target value",
+ currentReplicas: 4,
+ currentValue: 35.0,
+ targetValue: 30.0,
+ expectedReplicas: 5,
+ },
+ {
+ name: "target value is zero",
+ currentReplicas: 4,
+ currentValue: 35.0,
+ targetValue: 0,
+ expectedReplicas: -1,
+ errMsg: "targetValue in calculate func can't be zero",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ r, err := calculate(tt.currentValue, tt.targetValue, tt.currentReplicas)
+ if len(tt.errMsg) < 1 {
+ g.Expect(err).Should(BeNil())
+ } else {
+ g.Expect(err).ShouldNot(BeNil())
+ g.Expect(err.Error()).Should(Equal(tt.errMsg))
+ }
+ g.Expect(r).Should(Equal(tt.expectedReplicas))
+ })
+ }
+}
diff --git a/pkg/autoscaler/autoscaler/calculate/cpu.go b/pkg/autoscaler/autoscaler/calculate/cpu.go
new file mode 100644
index 0000000000..2d803fe205
--- /dev/null
+++ b/pkg/autoscaler/autoscaler/calculate/cpu.go
@@ -0,0 +1,86 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package calculate
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ promClient "github.com/prometheus/client_golang/api"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+)
+
+const (
+ CpuSumMetricsErrorMsg = "tac[%s/%s] cpu sum metrics error, can't calculate the past %s cpu metrics, may caused by prometheus restart while data persistence not enabled"
+)
+
+//TODO: create issue to explain how auto-scaling algorithm based on cpu metrics work
+func CalculateRecomendedReplicasByCpuCosts(tac *v1alpha1.TidbClusterAutoScaler, sq *SingleQuery, sts *appsv1.StatefulSet,
+ client promClient.Client, memberType v1alpha1.MemberType, duration time.Duration) (int32, error) {
+ metric := sq.Metric
+ instances := sq.Instances
+
+ if metric.Resource == nil || metric.Resource.Target.AverageUtilization == nil {
+ return -1, fmt.Errorf(InvalidTacMetricConfigureMsg, tac.Namespace, tac.Name)
+ }
+ currentReplicas := len(instances)
+ c, err := filterContainer(tac, sts, memberType.String())
+ if err != nil {
+ return -1, err
+ }
+ cpuRequestsRatio, err := extractCpuRequestsRatio(c)
+ if err != nil {
+ return -1, err
+ }
+ r := &Response{}
+ err = queryMetricsFromPrometheus(tac, client, sq, r)
+ if err != nil {
+ return -1, err
+ }
+ sum, err := sumForEachInstance(instances, r)
+ if err != nil {
+ return -1, err
+ }
+ if sum < 0 {
+ return -1, fmt.Errorf(CpuSumMetricsErrorMsg, tac.Namespace, tac.Name, duration.String())
+ }
+ cpuSecsTotal := sum
+ durationSeconds := duration.Seconds()
+ utilizationRatio := float64(*metric.Resource.Target.AverageUtilization) / 100.0
+ expectedCpuSecsTotal := cpuRequestsRatio * durationSeconds * float64(currentReplicas) * utilizationRatio
+ rc, err := calculate(cpuSecsTotal, expectedCpuSecsTotal, int32(currentReplicas))
+ if err != nil {
+ return -1, err
+ }
+ metrics := v1alpha1.MetricsStatus{
+ Name: string(MetricTypeCPU),
+ CurrentValue: fmt.Sprintf("%v", cpuSecsTotal),
+ ThresholdValue: fmt.Sprintf("%v", expectedCpuSecsTotal),
+ }
+ if memberType == v1alpha1.TiKVMemberType {
+ addMetricsStatusIntoMetricsStatusList(metrics, &tac.Status.TiKV.BasicAutoScalerStatus)
+ } else if memberType == v1alpha1.TiDBMemberType {
+ addMetricsStatusIntoMetricsStatusList(metrics, &tac.Status.TiDB.BasicAutoScalerStatus)
+ }
+ return rc, nil
+}
+
+func extractCpuRequestsRatio(c *corev1.Container) (float64, error) {
+ if c.Resources.Requests.Cpu() == nil || c.Resources.Requests.Cpu().MilliValue() < 1 {
+ return 0, fmt.Errorf("container[%s] cpu requests is empty", c.Name)
+ }
+ return float64(c.Resources.Requests.Cpu().MilliValue()) / 1000.0, nil
+}
diff --git a/pkg/autoscaler/autoscaler/calculate/cpu_test.go b/pkg/autoscaler/autoscaler/calculate/cpu_test.go
new file mode 100644
index 0000000000..712b1bc281
--- /dev/null
+++ b/pkg/autoscaler/autoscaler/calculate/cpu_test.go
@@ -0,0 +1,101 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package calculate
+
+import (
+ "fmt"
+ "testing"
+
+ . "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+)
+
+func TestExtractCpuRequestsRatio(t *testing.T) {
+ g := NewGomegaWithT(t)
+ tests := []struct {
+ name string
+ defineRequest bool
+ cpuValue string
+ expectedRadio float64
+ occurError bool
+ errMsg string
+ }{
+ {
+ name: "cpu 1",
+ defineRequest: true,
+ cpuValue: "1",
+ occurError: false,
+ expectedRadio: 1.0,
+ errMsg: "",
+ },
+ {
+ name: "cpu 1000m",
+ defineRequest: true,
+ cpuValue: "1000m",
+ occurError: false,
+ expectedRadio: 1.0,
+ errMsg: "",
+ },
+ {
+ name: "cpu 1500m",
+ defineRequest: true,
+ cpuValue: "1500m",
+ occurError: false,
+ expectedRadio: 1.5,
+ errMsg: "",
+ },
+ {
+ name: "no cpu request",
+ defineRequest: false,
+ cpuValue: "",
+ occurError: true,
+ expectedRadio: 0,
+ errMsg: fmt.Sprintf("container[%s] cpu requests is empty", "container"),
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := newContainer()
+ if tt.defineRequest {
+ c.Resources.Requests = map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceCPU: resource.MustParse(tt.cpuValue),
+ }
+ } else {
+ c.Resources.Requests = map[corev1.ResourceName]resource.Quantity{}
+ }
+ r, err := extractCpuRequestsRatio(c)
+ if !tt.occurError {
+ g.Expect(err).Should(BeNil())
+ } else {
+ g.Expect(err).ShouldNot(BeNil())
+ g.Expect(err.Error()).Should(Equal(tt.errMsg))
+ }
+ g.Expect(almostEqual(r, tt.expectedRadio)).Should(Equal(true))
+ })
+ }
+}
+
+func newContainer() *corev1.Container {
+ return &corev1.Container{
+ Name: "container",
+ Image: "fake:fake",
+ Resources: corev1.ResourceRequirements{
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceCPU: resource.MustParse("1"),
+ },
+ },
+ }
+}
diff --git a/pkg/autoscaler/autoscaler/calculate/util.go b/pkg/autoscaler/autoscaler/calculate/util.go
new file mode 100644
index 0000000000..14ded46ec9
--- /dev/null
+++ b/pkg/autoscaler/autoscaler/calculate/util.go
@@ -0,0 +1,103 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package calculate
+
+import (
+ "fmt"
+
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ appsv1 "k8s.io/api/apps/v1"
+ autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
+ corev1 "k8s.io/api/core/v1"
+)
+
+// MetricType describe the current Supported Metric Type to calculate the recommended Replicas
+type MetricType string
+
+const (
+ MetricTypeCPU MetricType = "cpu"
+ //metricTypeQPS MetricType = "qps"
+)
+
+// currently, we only choose one metrics to be computed.
+// If there exists several metrics, we tend to choose ResourceMetricSourceType metric
+func FilterMetrics(metrics []autoscalingv2beta2.MetricSpec) autoscalingv2beta2.MetricSpec {
+ for _, m := range metrics {
+ if m.Type == autoscalingv2beta2.ResourceMetricSourceType && m.Resource != nil {
+ return m
+ }
+ }
+ return metrics[0]
+}
+
+// genMetricType return the supported MetricType in Operator by kubernetes auto-scaling MetricType
+func GenMetricType(tac *v1alpha1.TidbClusterAutoScaler, metric autoscalingv2beta2.MetricSpec) (MetricType, error) {
+ if metric.Type == autoscalingv2beta2.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == corev1.ResourceCPU {
+ return MetricTypeCPU, nil
+ }
+ return "", fmt.Errorf(InvalidTacMetricConfigureMsg, tac.Namespace, tac.Name)
+}
+
+// filterContainer is to filter the specific container from the given statefulset(tidb/tikv)
+func filterContainer(tac *v1alpha1.TidbClusterAutoScaler, sts *appsv1.StatefulSet, containerName string) (*corev1.Container, error) {
+ for _, c := range sts.Spec.Template.Spec.Containers {
+ if c.Name == containerName {
+ return &c, nil
+ }
+ }
+ return nil, fmt.Errorf("tac[%s/%s]'s Target have not %s container", tac.Namespace, tac.Name, containerName)
+}
+
+func addMetricsStatusIntoMetricsStatusList(metrics v1alpha1.MetricsStatus, basicStatus *v1alpha1.BasicAutoScalerStatus) {
+ if basicStatus.MetricsStatusList == nil {
+ basicStatus.MetricsStatusList = []v1alpha1.MetricsStatus{}
+ }
+ for id, m := range basicStatus.MetricsStatusList {
+ if m.Name == metrics.Name {
+ basicStatus.MetricsStatusList[id] = metrics
+ return
+ }
+ }
+ basicStatus.MetricsStatusList = append(basicStatus.MetricsStatusList, metrics)
+ return
+}
+
+const (
+ statusSuccess = "success"
+)
+
+// Response is used to marshal the data queried from Prometheus
+type Response struct {
+ Status string `json:"status"`
+ Data Data `json:"data"`
+}
+
+type Data struct {
+ ResultType string `json:"resultType"`
+ Result []Result `json:"result"`
+}
+
+type Result struct {
+ Metric Metric `json:"metric"`
+ Value []interface{} `json:"value"`
+}
+
+type Metric struct {
+ Cluster string `json:"cluster,omitempty"`
+ Instance string `json:"instance"`
+ Job string `json:"job,omitempty"`
+ KubernetesNamespace string `json:"kubernetes_namespace,omitempty"`
+ KubernetesNode string `json:"kubernetes_node,omitempty"`
+ KubernetesPodIp string `json:"kubernetes_pod_ip,omitempty"`
+}
diff --git a/pkg/autoscaler/autoscaler/tidb_autoscaler.go b/pkg/autoscaler/autoscaler/tidb_autoscaler.go
index bd943e3dfa..9b130ec8ce 100644
--- a/pkg/autoscaler/autoscaler/tidb_autoscaler.go
+++ b/pkg/autoscaler/autoscaler/tidb_autoscaler.go
@@ -14,18 +14,24 @@
package autoscaler
import (
+ "fmt"
"time"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ "github.com/pingcap/tidb-operator/pkg/autoscaler/autoscaler/calculate"
"github.com/pingcap/tidb-operator/pkg/label"
operatorUtils "github.com/pingcap/tidb-operator/pkg/util"
promClient "github.com/prometheus/client_golang/api"
+ appsv1 "k8s.io/api/apps/v1"
)
-func (am *autoScalerManager) syncTiDB(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler, client promClient.Client) error {
+func (am *autoScalerManager) syncTiDB(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler) error {
if tac.Spec.TiDB == nil {
return nil
}
+ if tac.Status.TiDB == nil {
+ tac.Status.TiDB = &v1alpha1.TidbAutoScalerStatus{}
+ }
sts, err := am.stsLister.StatefulSets(tc.Namespace).Get(operatorUtils.GetStatefulSetName(tc, v1alpha1.TiDBMemberType))
if err != nil {
return err
@@ -33,29 +39,86 @@ func (am *autoScalerManager) syncTiDB(tc *v1alpha1.TidbCluster, tac *v1alpha1.Ti
if !checkAutoScalingPrerequisites(tc, sts, v1alpha1.TiDBMemberType) {
return nil
}
- targetReplicas := tc.Spec.TiDB.Replicas
-
- // TODO: sync tidb.metrics from prometheus
- // rate(process_cpu_seconds_total{cluster="tidb",job="tidb"}[threshold Minute])
- //for _, _ = range tac.Spec.TiDB.Metrics {
- // // revive:disable:empty-block
- //}
+ currentReplicas := tc.Spec.TiDB.Replicas
+ instances := filterTidbInstances(tc)
+ targetReplicas, err := calculateTidbMetrics(tac, sts, instances)
+ if err != nil {
+ return err
+ }
targetReplicas = limitTargetReplicas(targetReplicas, tac, v1alpha1.TiDBMemberType)
if targetReplicas == tc.Spec.TiDB.Replicas {
return nil
}
+ return syncTiDBAfterCalculated(tc, tac, currentReplicas, targetReplicas, sts)
+}
+
+// syncTiDBAfterCalculated would check the Consecutive count to avoid jitter, and it would also check the interval
+// duration between each auto-scaling. If either of them is not meet, the auto-scaling would be rejected.
+// If the auto-scaling is permitted, the timestamp would be recorded and the Consecutive count would be zeroed.
+func syncTiDBAfterCalculated(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler, currentReplicas, recommendedReplicas int32, sts *appsv1.StatefulSet) error {
intervalSeconds := tac.Spec.TiDB.ScaleInIntervalSeconds
- if targetReplicas > tc.Spec.TiDB.Replicas {
+ if recommendedReplicas > currentReplicas {
intervalSeconds = tac.Spec.TiDB.ScaleOutIntervalSeconds
}
- ableToScale, err := checkStsAutoScalingInterval(tc, *intervalSeconds, v1alpha1.TiDBMemberType)
+ ableToScale, err := checkStsAutoScalingInterval(tac, *intervalSeconds, v1alpha1.TiDBMemberType)
if err != nil {
return err
}
if !ableToScale {
return nil
}
- tc.Spec.Annotations[label.AnnTiDBLastAutoScalingTimestamp] = time.Now().String()
- tc.Spec.TiDB.Replicas = targetReplicas
+ return updateTcTiDBIfScale(tc, tac, recommendedReplicas)
+}
+
+// Currently we didnt' record the auto-scaling out slot for tidb, because it is pointless for now.
+func updateTcTiDBIfScale(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler, recommendedReplicas int32) error {
+ tac.Annotations[label.AnnTiDBLastAutoScalingTimestamp] = fmt.Sprintf("%d", time.Now().Unix())
+ tc.Spec.TiDB.Replicas = recommendedReplicas
+ tac.Status.TiDB.RecommendedReplicas = &recommendedReplicas
return nil
}
+
+func calculateTidbMetrics(tac *v1alpha1.TidbClusterAutoScaler, sts *appsv1.StatefulSet, instances []string) (int32, error) {
+ ep, err := genMetricsEndpoint(tac)
+ if err != nil {
+ return -1, err
+ }
+ client, err := promClient.NewClient(promClient.Config{Address: ep})
+ if err != nil {
+ return -1, err
+ }
+ metric := calculate.FilterMetrics(tac.Spec.TiDB.Metrics)
+ mType, err := calculate.GenMetricType(tac, metric)
+ if err != nil {
+ return -1, err
+ }
+ duration, err := time.ParseDuration(*tac.Spec.TiDB.MetricsTimeDuration)
+ if err != nil {
+ return -1, err
+ }
+ sq := &calculate.SingleQuery{
+ Endpoint: ep,
+ Timestamp: time.Now().Unix(),
+ Instances: instances,
+ Metric: metric,
+ Quary: fmt.Sprintf(calculate.TidbSumCpuMetricsPattern, tac.Spec.Cluster.Name, *tac.Spec.TiDB.MetricsTimeDuration),
+ }
+
+ switch mType {
+ case calculate.MetricTypeCPU:
+ return calculate.CalculateRecomendedReplicasByCpuCosts(tac, sq, sts, client, v1alpha1.TiDBMemberType, duration)
+ default:
+ return -1, fmt.Errorf(calculate.InvalidTacMetricConfigureMsg, tac.Namespace, tac.Name)
+ }
+}
+
+func filterTidbInstances(tc *v1alpha1.TidbCluster) []string {
+ var instances []string
+ for i := 0; int32(i) < tc.Status.TiDB.StatefulSet.Replicas; i++ {
+ podName := operatorUtils.GetPodName(tc, v1alpha1.TiDBMemberType, int32(i))
+ if _, existed := tc.Status.TiDB.FailureMembers[podName]; !existed {
+ instances = append(instances, podName)
+ }
+ }
+ return instances
+}
diff --git a/pkg/autoscaler/autoscaler/tikv_autoscaler.go b/pkg/autoscaler/autoscaler/tikv_autoscaler.go
index 256b9a6a9c..b36c00e601 100644
--- a/pkg/autoscaler/autoscaler/tikv_autoscaler.go
+++ b/pkg/autoscaler/autoscaler/tikv_autoscaler.go
@@ -14,18 +14,25 @@
package autoscaler
import (
+ "fmt"
"time"
+ "github.com/pingcap/advanced-statefulset/pkg/apis/apps/v1/helper"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ "github.com/pingcap/tidb-operator/pkg/autoscaler/autoscaler/calculate"
"github.com/pingcap/tidb-operator/pkg/label"
operatorUtils "github.com/pingcap/tidb-operator/pkg/util"
promClient "github.com/prometheus/client_golang/api"
+ appsv1 "k8s.io/api/apps/v1"
)
-func (am *autoScalerManager) syncTiKV(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler, client promClient.Client) error {
+func (am *autoScalerManager) syncTiKV(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler) error {
if tac.Spec.TiKV == nil {
return nil
}
+ if tac.Status.TiKV == nil {
+ tac.Status.TiKV = &v1alpha1.TikvAutoScalerStatus{}
+ }
sts, err := am.stsLister.StatefulSets(tc.Namespace).Get(operatorUtils.GetStatefulSetName(tc, v1alpha1.TiKVMemberType))
if err != nil {
return err
@@ -33,29 +40,105 @@ func (am *autoScalerManager) syncTiKV(tc *v1alpha1.TidbCluster, tac *v1alpha1.Ti
if !checkAutoScalingPrerequisites(tc, sts, v1alpha1.TiKVMemberType) {
return nil
}
- targetReplicas := tc.Spec.TiKV.Replicas
-
- // TODO: sync tikv .metrics from prometheus
- // sum(rate(tikv_grpc_msg_duration_seconds_count{cluster="tidb", type!="kv_gc"}[1m])) by (instance)
- //for _, _ = range tac.Spec.TiKV.Metrics {
- // // revive:disable:empty-block
- //}
+ instances := filterTiKVInstances(tc)
+ currentReplicas := int32(len(instances))
+ targetReplicas, err := calculateTikvMetrics(tac, sts, instances)
+ if err != nil {
+ return err
+ }
targetReplicas = limitTargetReplicas(targetReplicas, tac, v1alpha1.TiKVMemberType)
if targetReplicas == tc.Spec.TiKV.Replicas {
return nil
}
+ return syncTiKVAfterCalculated(tc, tac, currentReplicas, targetReplicas, sts)
+}
+
+// syncTiKVAfterCalculated would check the Consecutive count to avoid jitter, and it would also check the interval
+// duration between each auto-scaling. If either of them is not meet, the auto-scaling would be rejected.
+// If the auto-scaling is permitted, the timestamp would be recorded and the Consecutive count would be zeroed.
+// The currentReplicas of TiKV calculated in auto-scaling is the count of the StateUp TiKV instance, so we need to
+// add the number of other state tikv instance replicas when we update the TidbCluster.Spec.TiKV.Replicas
+func syncTiKVAfterCalculated(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler, currentReplicas, recommendedReplicas int32, sts *appsv1.StatefulSet) error {
+
intervalSeconds := tac.Spec.TiKV.ScaleInIntervalSeconds
- if targetReplicas > tc.Spec.TiKV.Replicas {
+ if recommendedReplicas > tc.Spec.TiKV.Replicas {
intervalSeconds = tac.Spec.TiKV.ScaleOutIntervalSeconds
}
- ableToScale, err := checkStsAutoScalingInterval(tc, *intervalSeconds, v1alpha1.TiKVMemberType)
+ ableToScale, err := checkStsAutoScalingInterval(tac, *intervalSeconds, v1alpha1.TiKVMemberType)
if err != nil {
return err
}
if !ableToScale {
return nil
}
- tc.Spec.Annotations[label.AnnTiKVLastAutoScalingTimestamp] = time.Now().String()
- tc.Spec.TiKV.Replicas = targetReplicas
+ return updateTcTiKVIfScale(tc, tac, currentReplicas, recommendedReplicas, sts)
+}
+
+//TODO: fetch tikv instances info from pdapi in future
+func filterTiKVInstances(tc *v1alpha1.TidbCluster) []string {
+ var instances []string
+ for _, store := range tc.Status.TiKV.Stores {
+ if store.State == v1alpha1.TiKVStateUp {
+ instances = append(instances, store.PodName)
+ }
+ }
+ return instances
+}
+
+// we record the auto-scaling out slot for tikv, in order to add special hot labels when they are created
+func updateTcTiKVIfScale(tc *v1alpha1.TidbCluster, tac *v1alpha1.TidbClusterAutoScaler, currentReplicas, recommendedReplicas int32, sts *appsv1.StatefulSet) error {
+ tac.Annotations[label.AnnTiKVLastAutoScalingTimestamp] = fmt.Sprintf("%d", time.Now().Unix())
+ if recommendedReplicas > currentReplicas {
+ newlyScaleOutOrdinalSets := helper.GetPodOrdinals(recommendedReplicas, sts).Difference(helper.GetPodOrdinals(currentReplicas, sts))
+ if newlyScaleOutOrdinalSets.Len() > 0 {
+ if tc.Annotations == nil {
+ tc.Annotations = map[string]string{}
+ }
+ existed := operatorUtils.GetAutoScalingOutSlots(tc, v1alpha1.TiKVMemberType)
+ v, err := operatorUtils.Encode(newlyScaleOutOrdinalSets.Union(existed).List())
+ if err != nil {
+ return err
+ }
+ tc.Annotations[label.AnnTiKVAutoScalingOutOrdinals] = v
+ }
+ }
+ tc.Spec.TiKV.Replicas = recommendedReplicas
+ tac.Status.TiKV.RecommendedReplicas = &recommendedReplicas
return nil
}
+
+func calculateTikvMetrics(tac *v1alpha1.TidbClusterAutoScaler, sts *appsv1.StatefulSet, instances []string) (int32, error) {
+ ep, err := genMetricsEndpoint(tac)
+ if err != nil {
+ return -1, err
+ }
+ client, err := promClient.NewClient(promClient.Config{Address: ep})
+ if err != nil {
+ return -1, err
+ }
+
+ metric := calculate.FilterMetrics(tac.Spec.TiKV.Metrics)
+ mType, err := calculate.GenMetricType(tac, metric)
+ if err != nil {
+ return -1, err
+ }
+
+ duration, err := time.ParseDuration(*tac.Spec.TiKV.MetricsTimeDuration)
+ if err != nil {
+ return -1, err
+ }
+ sq := &calculate.SingleQuery{
+ Endpoint: ep,
+ Timestamp: time.Now().Unix(),
+ Instances: instances,
+ Metric: metric,
+ Quary: fmt.Sprintf(calculate.TikvSumCpuMetricsPattern, tac.Spec.Cluster.Name, *tac.Spec.TiKV.MetricsTimeDuration),
+ }
+
+ switch mType {
+ case calculate.MetricTypeCPU:
+ return calculate.CalculateRecomendedReplicasByCpuCosts(tac, sq, sts, client, v1alpha1.TiKVMemberType, duration)
+ default:
+ return -1, fmt.Errorf(calculate.InvalidTacMetricConfigureMsg, tac.Namespace, tac.Name)
+ }
+}
diff --git a/pkg/autoscaler/autoscaler/util.go b/pkg/autoscaler/autoscaler/util.go
index 6ed74d8456..9f21156475 100644
--- a/pkg/autoscaler/autoscaler/util.go
+++ b/pkg/autoscaler/autoscaler/util.go
@@ -14,6 +14,7 @@
package autoscaler
import (
+ "fmt"
"strconv"
"time"
@@ -26,6 +27,14 @@ import (
"k8s.io/utils/pointer"
)
+const (
+ annScaleOutSuffix = "tidb.pingcap.com/consecutive-scale-out-count"
+ annScaleInSuffix = "tidb.pingcap.com/consecutive-scale-in-count"
+
+ invalidMemberTypeErrorMsg = "tac[%s/%s] invalid set MemberType:%s"
+ invalidTacAnnotationErrorMsg = "tac[%s/%s] annotation invalid set,err:%v"
+)
+
var defaultMetricSpec = autoscalingv2beta2.MetricSpec{
Type: autoscalingv2beta2.ResourceMetricSourceType,
Resource: &autoscalingv2beta2.ResourceMetricSource{
@@ -49,13 +58,13 @@ func checkStsAutoScalingPrerequisites(set *appsv1.StatefulSet) bool {
}
// checkStsAutoScalingInterval would check whether there is enough interval duration between every two auto-scaling
-func checkStsAutoScalingInterval(tc *v1alpha1.TidbCluster, intervalSeconds int32, memberType v1alpha1.MemberType) (bool, error) {
- if tc.Annotations == nil {
- tc.Annotations = map[string]string{}
+func checkStsAutoScalingInterval(tac *v1alpha1.TidbClusterAutoScaler, intervalSeconds int32, memberType v1alpha1.MemberType) (bool, error) {
+ if tac.Annotations == nil {
+ tac.Annotations = map[string]string{}
}
- lastAutoScalingTimestamp, existed := tc.Annotations[label.AnnTiDBLastAutoScalingTimestamp]
+ lastAutoScalingTimestamp, existed := tac.Annotations[label.AnnTiDBLastAutoScalingTimestamp]
if memberType == v1alpha1.TiKVMemberType {
- lastAutoScalingTimestamp, existed = tc.Annotations[label.AnnTiKVLastAutoScalingTimestamp]
+ lastAutoScalingTimestamp, existed = tac.Annotations[label.AnnTiKVLastAutoScalingTimestamp]
}
if !existed {
return true, nil
@@ -126,7 +135,23 @@ func defaultTAC(tac *v1alpha1.TidbClusterAutoScaler) {
if len(tac.Spec.TiKV.Metrics) == 0 {
tac.Spec.TiKV.Metrics = append(tac.Spec.TiKV.Metrics, defaultMetricSpec)
}
+ if tac.Spec.TiKV.ScaleInThreshold == nil {
+ tac.Spec.TiKV.ScaleInThreshold = pointer.Int32Ptr(5)
+ }
+ if tac.Spec.TiKV.ScaleOutThreshold == nil {
+ tac.Spec.TiKV.ScaleOutThreshold = pointer.Int32Ptr(3)
+ }
+ if tac.Spec.TiKV.MetricsTimeDuration == nil {
+ tac.Spec.TiKV.MetricsTimeDuration = pointer.StringPtr("3m")
+ }
+ if tac.Spec.TiKV.ScaleOutIntervalSeconds == nil {
+ tac.Spec.TiKV.ScaleOutIntervalSeconds = pointer.Int32Ptr(300)
+ }
+ if tac.Spec.TiKV.ScaleInIntervalSeconds == nil {
+ tac.Spec.TiKV.ScaleInIntervalSeconds = pointer.Int32Ptr(500)
+ }
}
+
if tac.Spec.TiDB != nil {
if tac.Spec.TiDB.MinReplicas == nil {
tac.Spec.TiDB.MinReplicas = pointer.Int32Ptr(1)
@@ -134,5 +159,59 @@ func defaultTAC(tac *v1alpha1.TidbClusterAutoScaler) {
if len(tac.Spec.TiDB.Metrics) == 0 {
tac.Spec.TiDB.Metrics = append(tac.Spec.TiDB.Metrics, defaultMetricSpec)
}
+ if tac.Spec.TiDB.ScaleInThreshold == nil {
+ tac.Spec.TiDB.ScaleInThreshold = pointer.Int32Ptr(5)
+ }
+ if tac.Spec.TiDB.ScaleOutThreshold == nil {
+ tac.Spec.TiDB.ScaleOutThreshold = pointer.Int32Ptr(3)
+ }
+ if tac.Spec.TiDB.MetricsTimeDuration == nil {
+ tac.Spec.TiDB.MetricsTimeDuration = pointer.StringPtr("3m")
+ }
+ if tac.Spec.TiDB.ScaleOutIntervalSeconds == nil {
+ tac.Spec.TiDB.ScaleOutIntervalSeconds = pointer.Int32Ptr(300)
+ }
+ if tac.Spec.TiDB.ScaleInIntervalSeconds == nil {
+ tac.Spec.TiDB.ScaleInIntervalSeconds = pointer.Int32Ptr(500)
+ }
+ }
+
+ if tac.Spec.Monitor != nil {
+ if len(tac.Spec.Monitor.Namespace) < 1 {
+ tac.Spec.Monitor.Namespace = tac.Namespace
+ }
+ }
+}
+
+func resetAutoScalingAnn(tac *v1alpha1.TidbClusterAutoScaler) {
+ tac.Annotations[label.AnnAutoScalingTargetNamespace] = tac.Spec.Cluster.Namespace
+ tac.Annotations[label.AnnAutoScalingTargetName] = tac.Spec.Cluster.Name
+}
+
+// checkAndUpdateTacRef would compare the target tidbcluster ref stored in the annotations
+// and in the Spec. It not equal, the previous stored status would be empty and the stored Ref
+// would be updated.
+func checkAndUpdateTacAnn(tac *v1alpha1.TidbClusterAutoScaler) {
+ if tac.Annotations == nil {
+ tac.Annotations = map[string]string{}
+ resetAutoScalingAnn(tac)
+ return
+ }
+ name := tac.Annotations[label.AnnAutoScalingTargetName]
+ namespace := tac.Annotations[label.AnnAutoScalingTargetNamespace]
+ if name == tac.Spec.Cluster.Name && namespace == tac.Spec.Cluster.Namespace {
+ return
+ }
+ // If not satisfied, reset tac Ann
+ resetAutoScalingAnn(tac)
+}
+
+func genMetricsEndpoint(tac *v1alpha1.TidbClusterAutoScaler) (string, error) {
+ if tac.Spec.MetricsUrl == nil && tac.Spec.Monitor == nil {
+ return "", fmt.Errorf("tac[%s/%s] metrics url or monitor should be defined explicitly", tac.Namespace, tac.Name)
+ }
+ if tac.Spec.MetricsUrl != nil {
+ return *tac.Spec.MetricsUrl, nil
}
+ return fmt.Sprintf("http://%s-prometheus.%s.svc:9090", tac.Spec.Monitor.Name, tac.Spec.Monitor.Namespace), nil
}
diff --git a/pkg/autoscaler/autoscaler/util_test.go b/pkg/autoscaler/autoscaler/util_test.go
new file mode 100644
index 0000000000..6668dcf070
--- /dev/null
+++ b/pkg/autoscaler/autoscaler/util_test.go
@@ -0,0 +1,404 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package autoscaler
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ . "github.com/onsi/gomega"
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ "github.com/pingcap/tidb-operator/pkg/label"
+ appsv1 "k8s.io/api/apps/v1"
+ autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
+ "k8s.io/utils/pointer"
+)
+
+func TestCheckStsAutoScalingInterval(t *testing.T) {
+ g := NewGomegaWithT(t)
+ tests := []struct {
+ name string
+ memberType v1alpha1.MemberType
+ HaveScaled bool
+ LastScaleIntervalSec int
+ expectedPermitScaling bool
+ }{
+ {
+ name: "tikv, first scaling",
+ memberType: v1alpha1.TiKVMemberType,
+ HaveScaled: false,
+ LastScaleIntervalSec: 0,
+ expectedPermitScaling: true,
+ },
+ {
+ name: "tikv, scaling 60 secs ago",
+ memberType: v1alpha1.TiKVMemberType,
+ HaveScaled: true,
+ LastScaleIntervalSec: 60,
+ expectedPermitScaling: false,
+ },
+ {
+ name: "tidb, first scaling",
+ memberType: v1alpha1.TiDBMemberType,
+ HaveScaled: false,
+ LastScaleIntervalSec: 0,
+ expectedPermitScaling: true,
+ },
+ {
+ name: "tidb, scaling 60 secs ago",
+ memberType: v1alpha1.TiDBMemberType,
+ HaveScaled: true,
+ LastScaleIntervalSec: 60,
+ expectedPermitScaling: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+
+ tac := newTidbClusterAutoScaler()
+ intervalSec := int32(100)
+ if tt.memberType == v1alpha1.TiKVMemberType {
+ if !tt.HaveScaled {
+ tac.Annotations = map[string]string{}
+ } else {
+ d := time.Duration(tt.LastScaleIntervalSec) * time.Second
+ tac.Annotations[label.AnnTiKVLastAutoScalingTimestamp] = fmt.Sprintf("%d", time.Now().Truncate(d).Unix())
+ }
+ } else if tt.memberType == v1alpha1.TiDBMemberType {
+ if !tt.HaveScaled {
+ tac.Annotations = map[string]string{}
+ } else {
+ d := time.Duration(tt.LastScaleIntervalSec) * time.Second
+ tac.Annotations[label.AnnTiDBLastAutoScalingTimestamp] = fmt.Sprintf("%d", time.Now().Truncate(d).Unix())
+ }
+ }
+ r, err := checkStsAutoScalingInterval(tac, intervalSec, tt.memberType)
+ g.Expect(err).Should(BeNil())
+ g.Expect(r).Should(Equal(tt.expectedPermitScaling))
+ })
+
+ }
+}
+
+func TestCheckStsAutoScalingPrerequisites(t *testing.T) {
+ g := NewGomegaWithT(t)
+ tests := []struct {
+ name string
+ stsUpdating bool
+ stsScaling bool
+ expectedCheckResult bool
+ }{
+ {
+ name: "upgrading",
+ stsUpdating: true,
+ stsScaling: false,
+ expectedCheckResult: false,
+ },
+ {
+ name: "scaling",
+ stsUpdating: false,
+ stsScaling: true,
+ expectedCheckResult: false,
+ },
+ {
+ name: "no upgrading, no scaling",
+ stsUpdating: false,
+ stsScaling: false,
+ expectedCheckResult: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ sts := newSts()
+ if tt.stsUpdating {
+ sts.Status.UpdateRevision = "1"
+ sts.Status.CurrentRevision = "2"
+ } else {
+ sts.Status.UpdateRevision = "1"
+ sts.Status.CurrentRevision = "1"
+ }
+ if tt.stsScaling {
+ sts.Spec.Replicas = pointer.Int32Ptr(1)
+ sts.Status.Replicas = 2
+ } else {
+ sts.Spec.Replicas = pointer.Int32Ptr(1)
+ sts.Status.Replicas = 1
+ }
+ r := checkStsAutoScalingPrerequisites(sts)
+ g.Expect(r).Should(Equal(tt.expectedCheckResult))
+ })
+ }
+
+}
+
+func TestLimitTargetReplicas(t *testing.T) {
+ g := NewGomegaWithT(t)
+ tests := []struct {
+ name string
+ targetReplicas int32
+ minReplicas int32
+ maxReplicas int32
+ memberType v1alpha1.MemberType
+ expectedReplicas int32
+ }{
+ {
+ name: "tikv,smaller than min",
+ targetReplicas: 1,
+ minReplicas: 2,
+ maxReplicas: 4,
+ memberType: v1alpha1.TiKVMemberType,
+ expectedReplicas: 2,
+ },
+ {
+ name: "tikv,equal min",
+ targetReplicas: 2,
+ minReplicas: 2,
+ maxReplicas: 4,
+ memberType: v1alpha1.TiKVMemberType,
+ expectedReplicas: 2,
+ },
+ {
+ name: "tikv,bigger than min, smaller than max",
+ targetReplicas: 3,
+ minReplicas: 2,
+ maxReplicas: 4,
+ memberType: v1alpha1.TiKVMemberType,
+ expectedReplicas: 3,
+ },
+ {
+ name: "tikv,equal max",
+ targetReplicas: 4,
+ minReplicas: 2,
+ maxReplicas: 4,
+ memberType: v1alpha1.TiKVMemberType,
+ expectedReplicas: 4,
+ },
+ {
+ name: "tikv,greater than max",
+ targetReplicas: 5,
+ minReplicas: 2,
+ maxReplicas: 4,
+ memberType: v1alpha1.TiKVMemberType,
+ expectedReplicas: 4,
+ },
+ //tidb
+ {
+ name: "tidb,smaller than min",
+ targetReplicas: 1,
+ minReplicas: 2,
+ maxReplicas: 4,
+ memberType: v1alpha1.TiDBMemberType,
+ expectedReplicas: 2,
+ },
+ {
+ name: "tidb,equal min",
+ targetReplicas: 2,
+ minReplicas: 2,
+ maxReplicas: 4,
+ memberType: v1alpha1.TiDBMemberType,
+ expectedReplicas: 2,
+ },
+ {
+ name: "tidb,bigger than min, smaller than max",
+ targetReplicas: 3,
+ minReplicas: 2,
+ maxReplicas: 4,
+ memberType: v1alpha1.TiDBMemberType,
+ expectedReplicas: 3,
+ },
+ {
+ name: "tidb,equal max",
+ targetReplicas: 4,
+ minReplicas: 2,
+ maxReplicas: 4,
+ memberType: v1alpha1.TiDBMemberType,
+ expectedReplicas: 4,
+ },
+ {
+ name: "tidb,greater than max",
+ targetReplicas: 5,
+ minReplicas: 2,
+ maxReplicas: 4,
+ memberType: v1alpha1.TiDBMemberType,
+ expectedReplicas: 4,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tac := newTidbClusterAutoScaler()
+ if tt.memberType == v1alpha1.TiKVMemberType {
+ tac.Spec.TiKV.MinReplicas = pointer.Int32Ptr(tt.minReplicas)
+ tac.Spec.TiKV.MaxReplicas = tt.maxReplicas
+ } else if tt.memberType == v1alpha1.TiDBMemberType {
+ tac.Spec.TiDB.MinReplicas = pointer.Int32Ptr(tt.minReplicas)
+ tac.Spec.TiDB.MaxReplicas = tt.maxReplicas
+ }
+ r := limitTargetReplicas(tt.targetReplicas, tac, tt.memberType)
+ g.Expect(tt.expectedReplicas).Should(Equal(r))
+ })
+ }
+}
+
+func TestDefaultTac(t *testing.T) {
+ g := NewGomegaWithT(t)
+ tac := newTidbClusterAutoScaler()
+ tac.Spec.TiDB = nil
+ tac.Spec.TiKV.MinReplicas = nil
+ tac.Spec.TiKV.Metrics = []autoscalingv2beta2.MetricSpec{}
+ tac.Spec.TiKV.MetricsTimeDuration = nil
+ tac.Spec.TiKV.ScaleOutIntervalSeconds = nil
+ tac.Spec.TiKV.ScaleInIntervalSeconds = nil
+ defaultTAC(tac)
+ g.Expect(*tac.Spec.TiKV.MinReplicas).Should(Equal(int32(1)))
+ g.Expect(len(tac.Spec.TiKV.Metrics)).Should(Equal(1))
+ g.Expect(*tac.Spec.TiKV.MetricsTimeDuration).Should(Equal("3m"))
+ g.Expect(*tac.Spec.TiKV.ScaleOutIntervalSeconds).Should(Equal(int32(300)))
+ g.Expect(*tac.Spec.TiKV.ScaleInIntervalSeconds).Should(Equal(int32(500)))
+
+ tac = newTidbClusterAutoScaler()
+ tac.Spec.TiKV = nil
+ tac.Spec.TiDB.MinReplicas = nil
+ tac.Spec.TiDB.Metrics = []autoscalingv2beta2.MetricSpec{}
+ tac.Spec.TiDB.MetricsTimeDuration = nil
+ tac.Spec.TiDB.ScaleOutIntervalSeconds = nil
+ tac.Spec.TiDB.ScaleInIntervalSeconds = nil
+ defaultTAC(tac)
+ g.Expect(*tac.Spec.TiDB.MinReplicas).Should(Equal(int32(1)))
+ g.Expect(len(tac.Spec.TiDB.Metrics)).Should(Equal(1))
+ g.Expect(*tac.Spec.TiDB.MetricsTimeDuration).Should(Equal("3m"))
+ g.Expect(*tac.Spec.TiDB.ScaleOutIntervalSeconds).Should(Equal(int32(300)))
+ g.Expect(*tac.Spec.TiDB.ScaleInIntervalSeconds).Should(Equal(int32(500)))
+
+}
+
+func TestCheckAndUpdateTacAnn(t *testing.T) {
+ g := NewGomegaWithT(t)
+ tests := []struct {
+ name string
+ haveScaling bool
+ targetNamespace string
+ targetName string
+ markedNamespace string
+ markedName string
+ }{
+ {
+ name: "first syncing",
+ haveScaling: false,
+ markedName: "",
+ markedNamespace: "",
+ targetName: "foo",
+ targetNamespace: "bar",
+ },
+ {
+ name: "second syncing",
+ haveScaling: true,
+ markedName: "foo",
+ markedNamespace: "bar",
+ targetName: "foo",
+ targetNamespace: "bar",
+ },
+ {
+ name: "change target",
+ haveScaling: true,
+ markedName: "foo",
+ markedNamespace: "bar",
+ targetName: "foo2",
+ targetNamespace: "bar2",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tac := newTidbClusterAutoScaler()
+ tac.Annotations = nil
+ tac.Spec.Cluster.Name = tt.targetName
+ tac.Spec.Cluster.Namespace = tt.targetNamespace
+ if tt.haveScaling {
+ tac.Annotations = map[string]string{}
+ tac.Annotations[label.AnnAutoScalingTargetNamespace] = tt.targetNamespace
+ tac.Annotations[label.AnnAutoScalingTargetName] = tt.targetName
+ }
+ checkAndUpdateTacAnn(tac)
+ v, ok := tac.Annotations[label.AnnAutoScalingTargetNamespace]
+ g.Expect(ok).Should(Equal(ok))
+ g.Expect(v).Should(Equal(tt.targetNamespace))
+ v, ok = tac.Annotations[label.AnnAutoScalingTargetName]
+ g.Expect(ok).Should(Equal(ok))
+ g.Expect(v).Should(Equal(tt.targetName))
+ })
+ }
+}
+
+func TestGenMetricsEndpoint(t *testing.T) {
+ g := NewGomegaWithT(t)
+ tac := newTidbClusterAutoScaler()
+ tac.Spec.Monitor = nil
+ r, err := genMetricsEndpoint(tac)
+ g.Expect(err).ShouldNot(BeNil())
+ g.Expect(err.Error()).Should(Equal(fmt.Sprintf("tac[%s/%s] metrics url or monitor should be defined explicitly", tac.Namespace, tac.Name)))
+ g.Expect(r).Should(Equal(""))
+
+ tac.Spec.Monitor = &v1alpha1.TidbMonitorRef{
+ Name: "monitor",
+ Namespace: "default",
+ }
+ r, err = genMetricsEndpoint(tac)
+ g.Expect(err).Should(BeNil())
+ g.Expect(r).Should(Equal(fmt.Sprintf("http://%s-prometheus.%s.svc:9090", tac.Spec.Monitor.Name, tac.Spec.Monitor.Namespace)))
+
+ u := "metrics-url"
+ tac.Spec.MetricsUrl = &u
+ r, err = genMetricsEndpoint(tac)
+ g.Expect(err).Should(BeNil())
+ g.Expect(r).Should(Equal(u))
+}
+
+func newTidbClusterAutoScaler() *v1alpha1.TidbClusterAutoScaler {
+ tac := &v1alpha1.TidbClusterAutoScaler{}
+ tac.Name = "tac"
+ tac.Namespace = "default"
+ tac.Annotations = map[string]string{}
+ tac.Spec.Cluster = v1alpha1.TidbClusterRef{
+ Name: "tc",
+ Namespace: "default",
+ }
+ tac.Spec.Monitor = &v1alpha1.TidbMonitorRef{
+ Namespace: "monitor",
+ Name: "default",
+ }
+ tac.Spec.TiKV = &v1alpha1.TikvAutoScalerSpec{}
+ tac.Spec.TiDB = &v1alpha1.TidbAutoScalerSpec{}
+ tac.Spec.TiKV.ScaleOutThreshold = pointer.Int32Ptr(2)
+ tac.Spec.TiKV.ScaleInThreshold = pointer.Int32Ptr(2)
+ tac.Spec.TiDB.ScaleOutThreshold = pointer.Int32Ptr(2)
+ tac.Spec.TiDB.ScaleInThreshold = pointer.Int32Ptr(2)
+ return tac
+}
+
+func newSts() *appsv1.StatefulSet {
+ return &appsv1.StatefulSet{
+ Spec: appsv1.StatefulSetSpec{
+ Replicas: pointer.Int32Ptr(1),
+ },
+ Status: appsv1.StatefulSetStatus{
+ CurrentRevision: "1",
+ UpdateRevision: "2",
+ Replicas: 2,
+ },
+ }
+}
diff --git a/pkg/backup/backup/backup_cleaner.go b/pkg/backup/backup/backup_cleaner.go
index 4b90d5874b..81df6426e6 100644
--- a/pkg/backup/backup/backup_cleaner.go
+++ b/pkg/backup/backup/backup_cleaner.go
@@ -24,9 +24,9 @@ import (
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
batchlisters "k8s.io/client-go/listers/batch/v1"
- corelisters "k8s.io/client-go/listers/core/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// BackupCleaner implements the logic for cleaning backup
@@ -36,7 +36,7 @@ type BackupCleaner interface {
type backupCleaner struct {
statusUpdater controller.BackupConditionUpdaterInterface
- secretLister corelisters.SecretLister
+ kubeCli kubernetes.Interface
jobLister batchlisters.JobLister
jobControl controller.JobControlInterface
}
@@ -44,12 +44,12 @@ type backupCleaner struct {
// NewBackupCleaner returns a BackupCleaner
func NewBackupCleaner(
statusUpdater controller.BackupConditionUpdaterInterface,
- secretLister corelisters.SecretLister,
+ kubeCli kubernetes.Interface,
jobLister batchlisters.JobLister,
jobControl controller.JobControlInterface) BackupCleaner {
return &backupCleaner{
statusUpdater,
- secretLister,
+ kubeCli,
jobLister,
jobControl,
}
@@ -63,7 +63,7 @@ func (bc *backupCleaner) Clean(backup *v1alpha1.Backup) error {
ns := backup.GetNamespace()
name := backup.GetName()
- glog.Infof("start to clean backup %s/%s", ns, name)
+ klog.Infof("start to clean backup %s/%s", ns, name)
cleanJobName := backup.GetCleanJobName()
_, err := bc.jobLister.Jobs(ns).Get(cleanJobName)
@@ -112,7 +112,7 @@ func (bc *backupCleaner) makeCleanJob(backup *v1alpha1.Backup) (*batchv1.Job, st
ns := backup.GetNamespace()
name := backup.GetName()
- storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.StorageProvider, bc.secretLister)
+ storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bc.kubeCli)
if err != nil {
return nil, reason, err
}
@@ -123,20 +123,24 @@ func (bc *backupCleaner) makeCleanJob(backup *v1alpha1.Backup) (*batchv1.Job, st
fmt.Sprintf("--backupName=%s", name),
}
+ serviceAccount := constants.DefaultServiceAccountName
+ if backup.Spec.ServiceAccount != "" {
+ serviceAccount = backup.Spec.ServiceAccount
+ }
backupLabel := label.NewBackup().Instance(backup.GetInstanceName()).CleanJob().Backup(name)
-
podSpec := &corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: backupLabel.Labels(),
+ Labels: backupLabel.Labels(),
+ Annotations: backup.Annotations,
},
Spec: corev1.PodSpec{
- ServiceAccountName: constants.DefaultServiceAccountName,
+ ServiceAccountName: serviceAccount,
Containers: []corev1.Container{
{
Name: label.BackupJobLabelVal,
Image: controller.TidbBackupManagerImage,
Args: args,
- ImagePullPolicy: corev1.PullAlways,
+ ImagePullPolicy: corev1.PullIfNotPresent,
Env: storageEnv,
},
},
diff --git a/pkg/backup/backup/backup_manager.go b/pkg/backup/backup/backup_manager.go
index 63bfada8ec..56b45ba7f8 100644
--- a/pkg/backup/backup/backup_manager.go
+++ b/pkg/backup/backup/backup_manager.go
@@ -15,18 +15,22 @@ package backup
import (
"fmt"
+ "strings"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/backup"
"github.com/pingcap/tidb-operator/pkg/backup/constants"
backuputil "github.com/pingcap/tidb-operator/pkg/backup/util"
+ v1alpha1listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/label"
+ "github.com/pingcap/tidb-operator/pkg/util"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
batchlisters "k8s.io/client-go/listers/batch/v1"
corelisters "k8s.io/client-go/listers/core/v1"
)
@@ -34,10 +38,11 @@ import (
type backupManager struct {
backupCleaner BackupCleaner
statusUpdater controller.BackupConditionUpdaterInterface
- secretLister corelisters.SecretLister
+ kubeCli kubernetes.Interface
jobLister batchlisters.JobLister
jobControl controller.JobControlInterface
pvcLister corelisters.PersistentVolumeClaimLister
+ tcLister v1alpha1listers.TidbClusterLister
pvcControl controller.GeneralPVCControlInterface
}
@@ -45,19 +50,21 @@ type backupManager struct {
func NewBackupManager(
backupCleaner BackupCleaner,
statusUpdater controller.BackupConditionUpdaterInterface,
- secretLister corelisters.SecretLister,
+ kubeCli kubernetes.Interface,
jobLister batchlisters.JobLister,
jobControl controller.JobControlInterface,
pvcLister corelisters.PersistentVolumeClaimLister,
+ tcLister v1alpha1listers.TidbClusterLister,
pvcControl controller.GeneralPVCControlInterface,
) backup.BackupManager {
return &backupManager{
backupCleaner,
statusUpdater,
- secretLister,
+ kubeCli,
jobLister,
jobControl,
pvcLister,
+ tcLister,
pvcControl,
}
}
@@ -163,16 +170,15 @@ func (bm *backupManager) makeExportJob(backup *v1alpha1.Backup) (*batchv1.Job, s
ns := backup.GetNamespace()
name := backup.GetName()
- envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, bm.secretLister)
+ envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, backup.Spec.UseKMS, bm.kubeCli)
if err != nil {
return nil, reason, err
}
- storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.StorageProvider, bm.secretLister)
+ storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bm.kubeCli)
if err != nil {
return nil, reason, fmt.Errorf("backup %s/%s, %v", ns, name, err)
}
-
envVars = append(envVars, storageEnv...)
// TODO: make pvc request storage size configurable
reason, err = bm.ensureBackupPVCExist(backup)
@@ -188,29 +194,30 @@ func (bm *backupManager) makeExportJob(backup *v1alpha1.Backup) (*batchv1.Job, s
args := []string{
"export",
fmt.Sprintf("--namespace=%s", ns),
- fmt.Sprintf("--host=%s", backup.Spec.From.Host),
- fmt.Sprintf("--port=%d", backup.Spec.From.Port),
- fmt.Sprintf("--user=%s", backup.Spec.From.User),
- fmt.Sprintf("--bucket=%s", bucketName),
fmt.Sprintf("--backupName=%s", name),
+ fmt.Sprintf("--bucket=%s", bucketName),
fmt.Sprintf("--storageType=%s", backuputil.GetStorageType(backup.Spec.StorageProvider)),
}
+ serviceAccount := constants.DefaultServiceAccountName
+ if backup.Spec.ServiceAccount != "" {
+ serviceAccount = backup.Spec.ServiceAccount
+ }
backupLabel := label.NewBackup().Instance(backup.GetInstanceName()).BackupJob().Backup(name)
-
// TODO: need add ResourceRequirement for backup job
podSpec := &corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: backupLabel.Labels(),
+ Labels: backupLabel.Labels(),
+ Annotations: backup.Annotations,
},
Spec: corev1.PodSpec{
- ServiceAccountName: constants.DefaultServiceAccountName,
+ ServiceAccountName: serviceAccount,
Containers: []corev1.Container{
{
Name: label.BackupJobLabelVal,
Image: controller.TidbBackupManagerImage,
Args: args,
- ImagePullPolicy: corev1.PullAlways,
+ ImagePullPolicy: corev1.PullIfNotPresent,
VolumeMounts: []corev1.VolumeMount{
{Name: label.BackupJobLabelVal, MountPath: constants.BackupRootPath},
},
@@ -218,6 +225,8 @@ func (bm *backupManager) makeExportJob(backup *v1alpha1.Backup) (*batchv1.Job, s
},
},
RestartPolicy: corev1.RestartPolicyNever,
+ Affinity: backup.Spec.Affinity,
+ Tolerations: backup.Spec.Tolerations,
Volumes: []corev1.Volume{
{
Name: label.BackupJobLabelVal,
@@ -248,39 +257,116 @@ func (bm *backupManager) makeExportJob(backup *v1alpha1.Backup) (*batchv1.Job, s
return job, "", nil
}
+
func (bm *backupManager) makeBackupJob(backup *v1alpha1.Backup) (*batchv1.Job, string, error) {
ns := backup.GetNamespace()
name := backup.GetName()
+ backupNamespace := ns
+ if backup.Spec.BR.ClusterNamespace != "" {
+ backupNamespace = backup.Spec.BR.ClusterNamespace
+ }
+ tc, err := bm.tcLister.TidbClusters(backupNamespace).Get(backup.Spec.BR.Cluster)
+ if err != nil {
+ return nil, fmt.Sprintf("failed to fetch tidbcluster %s/%s", backupNamespace, backup.Spec.BR.Cluster), err
+ }
+
+ var tikvVersion string
+ tikvImage := tc.TiKVImage()
+ imageVersion := strings.Split(tikvImage, ":")
+ if len(imageVersion) == 2 {
+ tikvVersion = imageVersion[1]
+ }
+
+ envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, backup.Spec.From.SecretName, backup.Spec.UseKMS, bm.kubeCli)
+ if err != nil {
+ return nil, reason, err
+ }
- envVars, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.StorageProvider, bm.secretLister)
+ storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, backup.Spec.UseKMS, backup.Spec.StorageProvider, bm.kubeCli)
if err != nil {
return nil, reason, fmt.Errorf("backup %s/%s, %v", ns, name, err)
}
+ envVars = append(envVars, storageEnv...)
+ envVars = append(envVars, corev1.EnvVar{
+ Name: "BR_LOG_TO_TERM",
+ Value: string(1),
+ })
+
args := []string{
"backup",
fmt.Sprintf("--namespace=%s", ns),
fmt.Sprintf("--backupName=%s", name),
}
+ if tikvVersion != "" {
+ args = append(args, fmt.Sprintf("--tikvVersion=%s", tikvVersion))
+ }
backupLabel := label.NewBackup().Instance(backup.GetInstanceName()).BackupJob().Backup(name)
+ volumeMounts := []corev1.VolumeMount{}
+ volumes := []corev1.Volume{}
+ if tc.Spec.TLSCluster != nil && tc.Spec.TLSCluster.Enabled {
+ args = append(args, "--cluster-tls=true")
+ volumeMounts = append(volumeMounts, corev1.VolumeMount{
+ Name: "cluster-client-tls",
+ ReadOnly: true,
+ MountPath: util.ClusterClientTLSPath,
+ })
+ volumes = append(volumes, corev1.Volume{
+ Name: "cluster-client-tls",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: util.ClusterClientTLSSecretName(backup.Spec.BR.Cluster),
+ },
+ },
+ })
+ }
+ if tc.Spec.TiDB.TLSClient != nil && tc.Spec.TiDB.TLSClient.Enabled && !tc.SkipTLSWhenConnectTiDB() {
+ args = append(args, "--client-tls=true")
+ clientSecretName := util.TiDBClientTLSSecretName(backup.Spec.BR.Cluster)
+ if backup.Spec.From.TLSClient != nil && backup.Spec.From.TLSClient.TLSSecret != "" {
+ clientSecretName = backup.Spec.From.TLSClient.TLSSecret
+ }
+ volumeMounts = append(volumeMounts, corev1.VolumeMount{
+ Name: "tidb-client-tls",
+ ReadOnly: true,
+ MountPath: util.TiDBClientTLSPath,
+ })
+ volumes = append(volumes, corev1.Volume{
+ Name: "tidb-client-tls",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: clientSecretName,
+ },
+ },
+ })
+ }
+ serviceAccount := constants.DefaultServiceAccountName
+ if backup.Spec.ServiceAccount != "" {
+ serviceAccount = backup.Spec.ServiceAccount
+ }
podSpec := &corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: backupLabel.Labels(),
+ Labels: backupLabel.Labels(),
+ Annotations: backup.Annotations,
},
Spec: corev1.PodSpec{
- ServiceAccountName: constants.DefaultServiceAccountName,
+ ServiceAccountName: serviceAccount,
Containers: []corev1.Container{
{
Name: label.BackupJobLabelVal,
Image: controller.TidbBackupManagerImage,
Args: args,
- ImagePullPolicy: corev1.PullAlways,
+ ImagePullPolicy: corev1.PullIfNotPresent,
+ VolumeMounts: volumeMounts,
Env: envVars,
},
},
RestartPolicy: corev1.RestartPolicyNever,
+ Affinity: backup.Spec.Affinity,
+ Tolerations: backup.Spec.Tolerations,
+ Volumes: volumes,
},
}
diff --git a/pkg/backup/backupschedule/backup_schedule_manager.go b/pkg/backup/backupschedule/backup_schedule_manager.go
index 9725ac21e5..d4eb31a3b5 100644
--- a/pkg/backup/backupschedule/backup_schedule_manager.go
+++ b/pkg/backup/backupschedule/backup_schedule_manager.go
@@ -30,7 +30,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
batchlisters "k8s.io/client-go/listers/batch/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
type backupScheduleManager struct {
@@ -160,7 +160,7 @@ func getLastScheduledTime(bs *v1alpha1.BackupSchedule) (*time.Time, error) {
now := time.Now()
if earliestTime.After(now) {
// timestamp fallback, waiting for the next backup schedule period
- glog.Errorf("backup schedule %s/%s timestamp fallback, lastBackupTime: %s, now: %s",
+ klog.Errorf("backup schedule %s/%s timestamp fallback, lastBackupTime: %s, now: %s",
ns, bsName, earliestTime.Format(time.RFC3339), now.Format(time.RFC3339))
return nil, nil
}
@@ -184,13 +184,13 @@ func getLastScheduledTime(bs *v1alpha1.BackupSchedule) (*time.Time, error) {
bs.Status.AllBackupCleanTime = &metav1.Time{Time: time.Now()}
return nil, controller.RequeueErrorf("recovery backup schedule %s/%s from pause status, refresh AllBackupCleanTime.", ns, bsName)
}
- glog.Errorf("Too many missed start backup schedule time (> 100). Check the clock.")
+ klog.Errorf("Too many missed start backup schedule time (> 100). Check the clock.")
return nil, nil
}
}
if len(scheduledTimes) == 0 {
- glog.V(4).Infof("unmet backup schedule %s/%s start time, waiting for the next backup schedule period", ns, bsName)
+ klog.V(4).Infof("unmet backup schedule %s/%s start time, waiting for the next backup schedule period", ns, bsName)
return nil, nil
}
scheduledTime := scheduledTimes[len(scheduledTimes)-1]
@@ -215,9 +215,16 @@ func (bm *backupScheduleManager) createBackup(bs *v1alpha1.BackupSchedule, times
}
}
} else {
+ var pdAddress, clusterNamespace string
+ clusterNamespace = backupSpec.BR.ClusterNamespace
+ if backupSpec.BR.ClusterNamespace == "" {
+ clusterNamespace = ns
+ }
+ pdAddress = fmt.Sprintf("%s-pd.%s:2379", backupSpec.BR.Cluster, clusterNamespace)
+
if backupSpec.S3 != nil {
backupSpec.S3.Prefix = path.Join(backupSpec.S3.Prefix,
- strings.ReplaceAll(backupSpec.BR.PDAddress, ":", "-")+"-"+timestamp.UTC().Format(constants.TimeFormat))
+ strings.ReplaceAll(pdAddress, ":", "-")+"-"+timestamp.UTC().Format(constants.TimeFormat))
}
}
@@ -226,9 +233,10 @@ func (bm *backupScheduleManager) createBackup(bs *v1alpha1.BackupSchedule, times
backup := &v1alpha1.Backup{
Spec: backupSpec,
ObjectMeta: metav1.ObjectMeta{
- Namespace: ns,
- Name: bs.GetBackupCRDName(timestamp),
- Labels: bsLabel.Labels(),
+ Namespace: ns,
+ Name: bs.GetBackupCRDName(timestamp),
+ Labels: bsLabel.Labels(),
+ Annotations: bs.Annotations,
OwnerReferences: []metav1.OwnerReference{
controller.GetBackupScheduleOwnerRef(bs),
},
@@ -253,7 +261,7 @@ func (bm *backupScheduleManager) backupGC(bs *v1alpha1.BackupSchedule) {
return
}
// TODO: When the backup schedule gc policy is not set, we should set a default backup gc policy.
- glog.Warningf("backup schedule %s/%s does not set backup gc policy", ns, bsName)
+ klog.Warningf("backup schedule %s/%s does not set backup gc policy", ns, bsName)
}
func (bm *backupScheduleManager) backupGCByMaxReservedTime(bs *v1alpha1.BackupSchedule) {
@@ -262,13 +270,13 @@ func (bm *backupScheduleManager) backupGCByMaxReservedTime(bs *v1alpha1.BackupSc
reservedTime, err := time.ParseDuration(*bs.Spec.MaxReservedTime)
if err != nil {
- glog.Errorf("backup schedule %s/%s, invalid MaxReservedTime %s", ns, bsName, *bs.Spec.MaxReservedTime)
+ klog.Errorf("backup schedule %s/%s, invalid MaxReservedTime %s", ns, bsName, *bs.Spec.MaxReservedTime)
return
}
backupsList, err := bm.getBackupList(bs, false)
if err != nil {
- glog.Errorf("backupGCByMaxReservedTime, err: %s", err)
+ klog.Errorf("backupGCByMaxReservedTime, err: %s", err)
return
}
@@ -279,11 +287,11 @@ func (bm *backupScheduleManager) backupGCByMaxReservedTime(bs *v1alpha1.BackupSc
}
// delete the expired backup
if err := bm.backupControl.DeleteBackup(backup); err != nil {
- glog.Errorf("backup schedule %s/%s gc backup %s failed, err %v", ns, bsName, backup.GetName(), err)
+ klog.Errorf("backup schedule %s/%s gc backup %s failed, err %v", ns, bsName, backup.GetName(), err)
return
}
deleteCount += 1
- glog.Infof("backup schedule %s/%s gc backup %s success", ns, bsName, backup.GetName())
+ klog.Infof("backup schedule %s/%s gc backup %s success", ns, bsName, backup.GetName())
}
if deleteCount == len(backupsList) {
@@ -300,7 +308,7 @@ func (bm *backupScheduleManager) backupGCByMaxBackups(bs *v1alpha1.BackupSchedul
backupsList, err := bm.getBackupList(bs, true)
if err != nil {
- glog.Errorf("backupGCByMaxBackups failed, err: %s", err)
+ klog.Errorf("backupGCByMaxBackups failed, err: %s", err)
return
}
@@ -311,11 +319,11 @@ func (bm *backupScheduleManager) backupGCByMaxBackups(bs *v1alpha1.BackupSchedul
}
// delete the backup
if err := bm.backupControl.DeleteBackup(backup); err != nil {
- glog.Errorf("backup schedule %s/%s gc backup %s failed, err %v", ns, bsName, backup.GetName(), err)
+ klog.Errorf("backup schedule %s/%s gc backup %s failed, err %v", ns, bsName, backup.GetName(), err)
return
}
deleteCount += 1
- glog.Infof("backup schedule %s/%s gc backup %s success", ns, bsName, backup.GetName())
+ klog.Infof("backup schedule %s/%s gc backup %s success", ns, bsName, backup.GetName())
}
if deleteCount == len(backupsList) {
diff --git a/pkg/backup/constants/constants.go b/pkg/backup/constants/constants.go
index 82ad258a12..ce4133c616 100644
--- a/pkg/backup/constants/constants.go
+++ b/pkg/backup/constants/constants.go
@@ -49,4 +49,13 @@ const (
// BackupManagerEnvVarPrefix represents the environment variable used for tidb-backup-manager must include this prefix
BackupManagerEnvVarPrefix = "BACKUP_MANAGER"
+
+ // BR certificate storage path
+ BRCertPath = "/var/lib/br-tls"
+
+ // ServiceAccountCAPath is where is CABundle of serviceaccount locates
+ ServiceAccountCAPath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+
+ // KMS secret env prefix
+ KMSSecretPrefix = "KMS_ENCRYPTED"
)
diff --git a/pkg/backup/restore/restore_manager.go b/pkg/backup/restore/restore_manager.go
index 737c1cd445..ceef74cbc6 100644
--- a/pkg/backup/restore/restore_manager.go
+++ b/pkg/backup/restore/restore_manager.go
@@ -15,19 +15,23 @@ package restore
import (
"fmt"
+ "strings"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/backup"
"github.com/pingcap/tidb-operator/pkg/backup/constants"
backuputil "github.com/pingcap/tidb-operator/pkg/backup/util"
listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1"
+ v1alpha1listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/label"
+ "github.com/pingcap/tidb-operator/pkg/util"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
batchlisters "k8s.io/client-go/listers/batch/v1"
corelisters "k8s.io/client-go/listers/core/v1"
)
@@ -35,10 +39,11 @@ import (
type restoreManager struct {
backupLister listers.BackupLister
statusUpdater controller.RestoreConditionUpdaterInterface
- secretLister corelisters.SecretLister
+ kubeCli kubernetes.Interface
jobLister batchlisters.JobLister
jobControl controller.JobControlInterface
pvcLister corelisters.PersistentVolumeClaimLister
+ tcLister v1alpha1listers.TidbClusterLister
pvcControl controller.GeneralPVCControlInterface
}
@@ -46,19 +51,21 @@ type restoreManager struct {
func NewRestoreManager(
backupLister listers.BackupLister,
statusUpdater controller.RestoreConditionUpdaterInterface,
- secretLister corelisters.SecretLister,
+ kubeCli kubernetes.Interface,
jobLister batchlisters.JobLister,
jobControl controller.JobControlInterface,
pvcLister corelisters.PersistentVolumeClaimLister,
+ tcLister v1alpha1listers.TidbClusterLister,
pvcControl controller.GeneralPVCControlInterface,
) backup.RestoreManager {
return &restoreManager{
backupLister,
statusUpdater,
- secretLister,
+ kubeCli,
jobLister,
jobControl,
pvcLister,
+ tcLister,
pvcControl,
}
}
@@ -154,12 +161,12 @@ func (rm *restoreManager) makeImportJob(restore *v1alpha1.Restore) (*batchv1.Job
ns := restore.GetNamespace()
name := restore.GetName()
- envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, rm.secretLister)
+ envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, restore.Spec.UseKMS, rm.kubeCli)
if err != nil {
return nil, reason, err
}
- storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.StorageProvider, rm.secretLister)
+ storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.UseKMS, restore.Spec.StorageProvider, rm.kubeCli)
if err != nil {
return nil, reason, fmt.Errorf("restore %s/%s, %v", ns, name, err)
}
@@ -174,27 +181,29 @@ func (rm *restoreManager) makeImportJob(restore *v1alpha1.Restore) (*batchv1.Job
"import",
fmt.Sprintf("--namespace=%s", ns),
fmt.Sprintf("--restoreName=%s", name),
- fmt.Sprintf("--host=%s", restore.Spec.To.Host),
- fmt.Sprintf("--port=%d", restore.Spec.To.Port),
- fmt.Sprintf("--user=%s", restore.Spec.To.User),
fmt.Sprintf("--backupPath=%s", backupPath),
}
restoreLabel := label.NewBackup().Instance(restore.GetInstanceName()).RestoreJob().Restore(name)
+ serviceAccount := constants.DefaultServiceAccountName
+ if restore.Spec.ServiceAccount != "" {
+ serviceAccount = restore.Spec.ServiceAccount
+ }
// TODO: need add ResourceRequirement for restore job
podSpec := &corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: restoreLabel.Labels(),
+ Labels: restoreLabel.Labels(),
+ Annotations: restore.Annotations,
},
Spec: corev1.PodSpec{
- ServiceAccountName: constants.DefaultServiceAccountName,
+ ServiceAccountName: serviceAccount,
Containers: []corev1.Container{
{
Name: label.RestoreJobLabelVal,
Image: controller.TidbBackupManagerImage,
Args: args,
- ImagePullPolicy: corev1.PullAlways,
+ ImagePullPolicy: corev1.PullIfNotPresent,
VolumeMounts: []corev1.VolumeMount{
{Name: label.RestoreJobLabelVal, MountPath: constants.BackupRootPath},
},
@@ -202,6 +211,8 @@ func (rm *restoreManager) makeImportJob(restore *v1alpha1.Restore) (*batchv1.Job
},
},
RestartPolicy: corev1.RestartPolicyNever,
+ Affinity: restore.Spec.Affinity,
+ Tolerations: restore.Spec.Tolerations,
Volumes: []corev1.Volume{
{
Name: label.RestoreJobLabelVal,
@@ -235,36 +246,110 @@ func (rm *restoreManager) makeImportJob(restore *v1alpha1.Restore) (*batchv1.Job
func (rm *restoreManager) makeRestoreJob(restore *v1alpha1.Restore) (*batchv1.Job, string, error) {
ns := restore.GetNamespace()
name := restore.GetName()
+ restoreNamespace := ns
+ if restore.Spec.BR.ClusterNamespace != "" {
+ restoreNamespace = restore.Spec.BR.ClusterNamespace
+ }
+ tc, err := rm.tcLister.TidbClusters(restoreNamespace).Get(restore.Spec.BR.Cluster)
+ if err != nil {
+ return nil, fmt.Sprintf("failed to fetch tidbcluster %s/%s", restoreNamespace, restore.Spec.BR.Cluster), err
+ }
+
+ var tikvVersion string
+ tikvImage := tc.TiKVImage()
+ imageVersion := strings.Split(tikvImage, ":")
+ if len(imageVersion) == 2 {
+ tikvVersion = imageVersion[1]
+ }
+
+ envVars, reason, err := backuputil.GenerateTidbPasswordEnv(ns, name, restore.Spec.To.SecretName, restore.Spec.UseKMS, rm.kubeCli)
+ if err != nil {
+ return nil, reason, err
+ }
- envVars, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.StorageProvider, rm.secretLister)
+ storageEnv, reason, err := backuputil.GenerateStorageCertEnv(ns, restore.Spec.UseKMS, restore.Spec.StorageProvider, rm.kubeCli)
if err != nil {
return nil, reason, fmt.Errorf("restore %s/%s, %v", ns, name, err)
}
+ envVars = append(envVars, storageEnv...)
+ envVars = append(envVars, corev1.EnvVar{
+ Name: "BR_LOG_TO_TERM",
+ Value: string(1),
+ })
args := []string{
"restore",
fmt.Sprintf("--namespace=%s", ns),
fmt.Sprintf("--restoreName=%s", name),
}
+ if tikvVersion != "" {
+ args = append(args, fmt.Sprintf("--tikvVersion=%s", tikvVersion))
+ }
restoreLabel := label.NewBackup().Instance(restore.GetInstanceName()).RestoreJob().Restore(name)
+ volumeMounts := []corev1.VolumeMount{}
+ volumes := []corev1.Volume{}
+ if tc.Spec.TLSCluster != nil && tc.Spec.TLSCluster.Enabled {
+ args = append(args, "--cluster-tls=true")
+ volumeMounts = append(volumeMounts, corev1.VolumeMount{
+ Name: "cluster-client-tls",
+ ReadOnly: true,
+ MountPath: util.ClusterClientTLSPath,
+ })
+ volumes = append(volumes, corev1.Volume{
+ Name: "cluster-client-tls",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: util.ClusterClientTLSSecretName(restore.Spec.BR.Cluster),
+ },
+ },
+ })
+ }
+ if tc.Spec.TiDB.TLSClient != nil && tc.Spec.TiDB.TLSClient.Enabled && !tc.SkipTLSWhenConnectTiDB() {
+ args = append(args, "--client-tls=true")
+ clientSecretName := util.TiDBClientTLSSecretName(restore.Spec.BR.Cluster)
+ if restore.Spec.To.TLSClient != nil && restore.Spec.To.TLSClient.TLSSecret != "" {
+ clientSecretName = restore.Spec.To.TLSClient.TLSSecret
+ }
+ volumeMounts = append(volumeMounts, corev1.VolumeMount{
+ Name: "tidb-client-tls",
+ ReadOnly: true,
+ MountPath: util.TiDBClientTLSPath,
+ })
+ volumes = append(volumes, corev1.Volume{
+ Name: "tidb-client-tls",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: clientSecretName,
+ },
+ },
+ })
+ }
+
+ serviceAccount := constants.DefaultServiceAccountName
+ if restore.Spec.ServiceAccount != "" {
+ serviceAccount = restore.Spec.ServiceAccount
+ }
// TODO: need add ResourceRequirement for restore job
podSpec := &corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: restoreLabel.Labels(),
+ Labels: restoreLabel.Labels(),
+ Annotations: restore.Annotations,
},
Spec: corev1.PodSpec{
- ServiceAccountName: constants.DefaultServiceAccountName,
+ ServiceAccountName: serviceAccount,
Containers: []corev1.Container{
{
Name: label.RestoreJobLabelVal,
Image: controller.TidbBackupManagerImage,
Args: args,
- ImagePullPolicy: corev1.PullAlways,
+ ImagePullPolicy: corev1.PullIfNotPresent,
+ VolumeMounts: volumeMounts,
Env: envVars,
},
},
+ Volumes: volumes,
RestartPolicy: corev1.RestartPolicyNever,
},
}
@@ -332,3 +417,21 @@ func (rm *restoreManager) ensureRestorePVCExist(restore *v1alpha1.Restore) (stri
}
var _ backup.RestoreManager = &restoreManager{}
+
+type FakeRestoreManager struct {
+ err error
+}
+
+func NewFakeRestoreManager() *FakeRestoreManager {
+ return &FakeRestoreManager{}
+}
+
+func (frm *FakeRestoreManager) SetSyncError(err error) {
+ frm.err = err
+}
+
+func (frm *FakeRestoreManager) Sync(_ *v1alpha1.Restore) error {
+ return frm.err
+}
+
+var _ backup.RestoreManager = &FakeRestoreManager{}
diff --git a/pkg/backup/util/util.go b/pkg/backup/util/util.go
index b22ebdefad..a656fcfeb5 100644
--- a/pkg/backup/util/util.go
+++ b/pkg/backup/util/util.go
@@ -22,7 +22,8 @@ import (
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/backup/constants"
corev1 "k8s.io/api/core/v1"
- corelisters "k8s.io/client-go/listers/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
)
// CheckAllKeysExistInSecret check if all keys are included in the specific secret
@@ -39,7 +40,7 @@ func CheckAllKeysExistInSecret(secret *corev1.Secret, keys ...string) (string, b
}
// GenerateS3CertEnvVar generate the env info in order to access S3 compliant storage
-func GenerateS3CertEnvVar(s3 *v1alpha1.S3StorageProvider) ([]corev1.EnvVar, string, error) {
+func GenerateS3CertEnvVar(s3 *v1alpha1.S3StorageProvider, useKMS bool) ([]corev1.EnvVar, string, error) {
var envVars []corev1.EnvVar
switch s3.Provider {
@@ -51,7 +52,7 @@ func GenerateS3CertEnvVar(s3 *v1alpha1.S3StorageProvider) ([]corev1.EnvVar, stri
break
}
if !strings.HasPrefix(s3.Endpoint, "http://") {
- return envVars, "InvalidS3Endpoint", fmt.Errorf("cenph endpoint URI %s must start with http://", s3.Endpoint)
+ return envVars, "InvalidS3Endpoint", fmt.Errorf("ceph endpoint URI %s must start with http://", s3.Endpoint)
}
case v1alpha1.S3StorageProviderTypeAWS:
// TODO: Check the storage class, if it is not a legal storage class, use the default storage class instead
@@ -86,24 +87,36 @@ func GenerateS3CertEnvVar(s3 *v1alpha1.S3StorageProvider) ([]corev1.EnvVar, stri
Name: "AWS_STORAGE_CLASS",
Value: s3.StorageClass,
},
- {
- Name: "AWS_ACCESS_KEY_ID",
- ValueFrom: &corev1.EnvVarSource{
- SecretKeyRef: &corev1.SecretKeySelector{
- LocalObjectReference: corev1.LocalObjectReference{Name: s3.SecretName},
- Key: constants.S3AccessKey,
+ }
+ if useKMS {
+ envVars = append(envVars, []corev1.EnvVar{
+ {
+ Name: "AWS_DEFAULT_REGION",
+ Value: s3.Region,
+ },
+ }...)
+ }
+ if s3.SecretName != "" {
+ envVars = append(envVars, []corev1.EnvVar{
+ {
+ Name: "AWS_ACCESS_KEY_ID",
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{Name: s3.SecretName},
+ Key: constants.S3AccessKey,
+ },
},
},
- },
- {
- Name: "AWS_SECRET_ACCESS_KEY",
- ValueFrom: &corev1.EnvVarSource{
- SecretKeyRef: &corev1.SecretKeySelector{
- LocalObjectReference: corev1.LocalObjectReference{Name: s3.SecretName},
- Key: constants.S3SecretKey,
+ {
+ Name: "AWS_SECRET_ACCESS_KEY",
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{Name: s3.SecretName},
+ Key: constants.S3SecretKey,
+ },
},
},
- },
+ }...)
}
return envVars, "", nil
}
@@ -148,9 +161,10 @@ func GenerateGcsCertEnvVar(gcs *v1alpha1.GcsStorageProvider) ([]corev1.EnvVar, s
}
// GenerateStorageCertEnv generate the env info in order to access backend backup storage
-func GenerateStorageCertEnv(ns string, provider v1alpha1.StorageProvider, secretLister corelisters.SecretLister) ([]corev1.EnvVar, string, error) {
+func GenerateStorageCertEnv(ns string, useKMS bool, provider v1alpha1.StorageProvider, kubeCli kubernetes.Interface) ([]corev1.EnvVar, string, error) {
var certEnv []corev1.EnvVar
var reason string
+ var err error
storageType := GetStorageType(provider)
switch storageType {
@@ -158,20 +172,23 @@ func GenerateStorageCertEnv(ns string, provider v1alpha1.StorageProvider, secret
if provider.S3 == nil {
return certEnv, "S3ConfigIsEmpty", errors.New("s3 config is empty")
}
+
s3SecretName := provider.S3.SecretName
- secret, err := secretLister.Secrets(ns).Get(s3SecretName)
- if err != nil {
- err := fmt.Errorf("get s3 secret %s/%s failed, err: %v", ns, s3SecretName, err)
- return certEnv, "GetS3SecretFailed", err
- }
+ if s3SecretName != "" {
+ secret, err := kubeCli.CoreV1().Secrets(ns).Get(s3SecretName, metav1.GetOptions{})
+ if err != nil {
+ err := fmt.Errorf("get s3 secret %s/%s failed, err: %v", ns, s3SecretName, err)
+ return certEnv, "GetS3SecretFailed", err
+ }
- keyStr, exist := CheckAllKeysExistInSecret(secret, constants.S3AccessKey, constants.S3SecretKey)
- if !exist {
- err := fmt.Errorf("s3 secret %s/%s missing some keys %s", ns, s3SecretName, keyStr)
- return certEnv, "s3KeyNotExist", err
+ keyStr, exist := CheckAllKeysExistInSecret(secret, constants.S3AccessKey, constants.S3SecretKey)
+ if !exist {
+ err := fmt.Errorf("s3 secret %s/%s missing some keys %s", ns, s3SecretName, keyStr)
+ return certEnv, "s3KeyNotExist", err
+ }
}
- certEnv, reason, err = GenerateS3CertEnvVar(provider.S3.DeepCopy())
+ certEnv, reason, err = GenerateS3CertEnvVar(provider.S3.DeepCopy(), useKMS)
if err != nil {
return certEnv, reason, err
}
@@ -180,7 +197,7 @@ func GenerateStorageCertEnv(ns string, provider v1alpha1.StorageProvider, secret
return certEnv, "GcsConfigIsEmpty", errors.New("gcs config is empty")
}
gcsSecretName := provider.Gcs.SecretName
- secret, err := secretLister.Secrets(ns).Get(gcsSecretName)
+ secret, err := kubeCli.CoreV1().Secrets(ns).Get(gcsSecretName, metav1.GetOptions{})
if err != nil {
err := fmt.Errorf("get gcs secret %s/%s failed, err: %v", ns, gcsSecretName, err)
return certEnv, "GetGcsSecretFailed", err
@@ -205,9 +222,10 @@ func GenerateStorageCertEnv(ns string, provider v1alpha1.StorageProvider, secret
}
// GenerateTidbPasswordEnv generate the password EnvVar
-func GenerateTidbPasswordEnv(ns, name, tidbSecretName string, secretLister corelisters.SecretLister) ([]corev1.EnvVar, string, error) {
+func GenerateTidbPasswordEnv(ns, name, tidbSecretName string, useKMS bool, kubeCli kubernetes.Interface) ([]corev1.EnvVar, string, error) {
var certEnv []corev1.EnvVar
- secret, err := secretLister.Secrets(ns).Get(tidbSecretName)
+ var passwordKey string
+ secret, err := kubeCli.CoreV1().Secrets(ns).Get(tidbSecretName, metav1.GetOptions{})
if err != nil {
err = fmt.Errorf("backup %s/%s get tidb secret %s failed, err: %v", ns, name, tidbSecretName, err)
return certEnv, "GetTidbSecretFailed", err
@@ -219,9 +237,15 @@ func GenerateTidbPasswordEnv(ns, name, tidbSecretName string, secretLister corel
return certEnv, "KeyNotExist", err
}
+ if useKMS {
+ passwordKey = fmt.Sprintf("%s_%s_%s", constants.KMSSecretPrefix, constants.BackupManagerEnvVarPrefix, strings.ToUpper(constants.TidbPasswordKey))
+ } else {
+ passwordKey = fmt.Sprintf("%s_%s", constants.BackupManagerEnvVarPrefix, strings.ToUpper(constants.TidbPasswordKey))
+ }
+
certEnv = []corev1.EnvVar{
{
- Name: fmt.Sprintf("%s_%s", constants.BackupManagerEnvVarPrefix, strings.ToUpper(constants.TidbPasswordKey)),
+ Name: passwordKey,
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{Name: tidbSecretName},
@@ -287,19 +311,20 @@ func GetBackupDataPath(provider v1alpha1.StorageProvider) (string, string, error
func ValidateBackup(backup *v1alpha1.Backup) error {
ns := backup.Namespace
name := backup.Name
+
+ if backup.Spec.From.Host == "" {
+ return fmt.Errorf("missing cluster config in spec of %s/%s", ns, name)
+ }
+ if backup.Spec.From.SecretName == "" {
+ return fmt.Errorf("missing tidbSecretName config in spec of %s/%s", ns, name)
+ }
if backup.Spec.BR == nil {
- if backup.Spec.From.Host == "" {
- return fmt.Errorf("missing cluster config in spec of %s/%s", ns, name)
- }
- if backup.Spec.From.SecretName == "" {
- return fmt.Errorf("missing tidbSecretName config in spec of %s/%s", ns, name)
- }
if backup.Spec.StorageSize == "" {
return fmt.Errorf("missing StorageSize config in spec of %s/%s", ns, name)
}
} else {
- if backup.Spec.BR.PDAddress == "" {
- return fmt.Errorf("pd address should be configured for BR in spec of %s/%s", ns, name)
+ if backup.Spec.BR.Cluster == "" {
+ return fmt.Errorf("cluster should be configured for BR in spec of %s/%s", ns, name)
}
if backup.Spec.Type != "" &&
backup.Spec.Type != v1alpha1.BackupTypeFull &&
@@ -338,19 +363,20 @@ func ValidateBackup(backup *v1alpha1.Backup) error {
func ValidateRestore(restore *v1alpha1.Restore) error {
ns := restore.Namespace
name := restore.Name
+
+ if restore.Spec.To.Host == "" {
+ return fmt.Errorf("missing cluster config in spec of %s/%s", ns, name)
+ }
+ if restore.Spec.To.SecretName == "" {
+ return fmt.Errorf("missing tidbSecretName config in spec of %s/%s", ns, name)
+ }
if restore.Spec.BR == nil {
- if restore.Spec.To.Host == "" {
- return fmt.Errorf("missing cluster config in spec of %s/%s", ns, name)
- }
- if restore.Spec.To.SecretName == "" {
- return fmt.Errorf("missing tidbSecretName config in spec of %s/%s", ns, name)
- }
if restore.Spec.StorageSize == "" {
return fmt.Errorf("missing StorageSize config in spec of %s/%s", ns, name)
}
} else {
- if restore.Spec.BR.PDAddress == "" {
- return fmt.Errorf("pd address should be configured for BR in spec of %s/%s", ns, name)
+ if restore.Spec.BR.Cluster == "" {
+ return fmt.Errorf("cluster should be configured for BR in spec of %s/%s", ns, name)
}
if restore.Spec.Type != "" &&
restore.Spec.Type != v1alpha1.BackupTypeFull &&
diff --git a/pkg/controller/autoscaler/tidbcluster_autoscaler_control.go b/pkg/controller/autoscaler/tidbcluster_autoscaler_control.go
index babeef3b45..864d6503c6 100644
--- a/pkg/controller/autoscaler/tidbcluster_autoscaler_control.go
+++ b/pkg/controller/autoscaler/tidbcluster_autoscaler_control.go
@@ -16,7 +16,6 @@ package autoscaler
import (
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/autoscaler"
- "github.com/pingcap/tidb-operator/pkg/controller"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/tools/record"
)
@@ -25,17 +24,15 @@ type ControlInterface interface {
ResconcileAutoScaler(ta *v1alpha1.TidbClusterAutoScaler) error
}
-func NewDefaultAutoScalerControl(recorder record.EventRecorder, ctrl controller.TypedControlInterface, asm autoscaler.AutoScalerManager) ControlInterface {
+func NewDefaultAutoScalerControl(recorder record.EventRecorder, asm autoscaler.AutoScalerManager) ControlInterface {
return &defaultAutoScalerControl{
recoder: recorder,
- typedControl: ctrl,
autoScalerManager: asm,
}
}
type defaultAutoScalerControl struct {
recoder record.EventRecorder
- typedControl controller.TypedControlInterface
autoScalerManager autoscaler.AutoScalerManager
}
diff --git a/pkg/controller/autoscaler/tidbcluster_autoscaler_controller.go b/pkg/controller/autoscaler/tidbcluster_autoscaler_controller.go
index a42a3ae799..5dace97792 100644
--- a/pkg/controller/autoscaler/tidbcluster_autoscaler_controller.go
+++ b/pkg/controller/autoscaler/tidbcluster_autoscaler_controller.go
@@ -20,6 +20,7 @@ import (
perrors "github.com/pingcap/errors"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/autoscaler/autoscaler"
+ "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned"
informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions"
listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
@@ -34,20 +35,17 @@ import (
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog"
- "sigs.k8s.io/controller-runtime/pkg/client"
)
type Controller struct {
- cli client.Client
control ControlInterface
taLister listers.TidbClusterAutoScalerLister
- tcLister listers.TidbClusterLister
queue workqueue.RateLimitingInterface
}
func NewController(
kubeCli kubernetes.Interface,
- genericCli client.Client,
+ cli versioned.Interface,
informerFactory informers.SharedInformerFactory,
kubeInformerFactory kubeinformers.SharedInformerFactory,
) *Controller {
@@ -56,14 +54,11 @@ func NewController(
eventBroadcaster.StartRecordingToSink(&eventv1.EventSinkImpl{
Interface: eventv1.New(kubeCli.CoreV1().RESTClient()).Events("")})
recorder := eventBroadcaster.NewRecorder(v1alpha1.Scheme, corev1.EventSource{Component: "tidbclusterautoscaler"})
-
autoScalerInformer := informerFactory.Pingcap().V1alpha1().TidbClusterAutoScalers()
- typedControl := controller.NewTypedControl(controller.NewRealGenericControl(genericCli, recorder))
+ asm := autoscaler.NewAutoScalerManager(cli, informerFactory, kubeInformerFactory, recorder)
- asm := autoscaler.NewAutoScalerManager(informerFactory, kubeInformerFactory, recorder)
tac := &Controller{
- cli: genericCli,
- control: NewDefaultAutoScalerControl(recorder, typedControl, asm),
+ control: NewDefaultAutoScalerControl(recorder, asm),
taLister: autoScalerInformer.Lister(),
queue: workqueue.NewNamedRateLimitingQueue(
workqueue.DefaultControllerRateLimiter(),
diff --git a/pkg/controller/backup/backup_control_test.go b/pkg/controller/backup/backup_control_test.go
index c3a36de68a..d0b30bd4e4 100644
--- a/pkg/controller/backup/backup_control_test.go
+++ b/pkg/controller/backup/backup_control_test.go
@@ -104,7 +104,7 @@ func TestBackupControlUpdateBackup(t *testing.T) {
},
},
{
- name: "backup manager update failed",
+ name: "backup manager sync failed",
update: func(backup *v1alpha1.Backup) {
backup.Finalizers = append(backup.Finalizers, label.BackupProtectionFinalizer)
},
diff --git a/pkg/controller/backup/backup_controller.go b/pkg/controller/backup/backup_controller.go
index dd2ec48e1a..e660649581 100644
--- a/pkg/controller/backup/backup_controller.go
+++ b/pkg/controller/backup/backup_controller.go
@@ -34,7 +34,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// Controller controls backup.
@@ -62,19 +62,19 @@ func NewController(
kubeInformerFactory kubeinformers.SharedInformerFactory,
) *Controller {
eventBroadcaster := record.NewBroadcaster()
- eventBroadcaster.StartLogging(glog.Infof)
+ eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&eventv1.EventSinkImpl{
Interface: eventv1.New(kubeCli.CoreV1().RESTClient()).Events("")})
recorder := eventBroadcaster.NewRecorder(v1alpha1.Scheme, corev1.EventSource{Component: "backup"})
backupInformer := informerFactory.Pingcap().V1alpha1().Backups()
+ tcInformer := informerFactory.Pingcap().V1alpha1().TidbClusters()
jobInformer := kubeInformerFactory.Batch().V1().Jobs()
pvcInformer := kubeInformerFactory.Core().V1().PersistentVolumeClaims()
- secretInformer := kubeInformerFactory.Core().V1().Secrets()
statusUpdater := controller.NewRealBackupConditionUpdater(cli, backupInformer.Lister(), recorder)
jobControl := controller.NewRealJobControl(kubeCli, recorder)
pvcControl := controller.NewRealGeneralPVCControl(kubeCli, recorder)
- backupCleaner := backup.NewBackupCleaner(statusUpdater, secretInformer.Lister(), jobInformer.Lister(), jobControl)
+ backupCleaner := backup.NewBackupCleaner(statusUpdater, kubeCli, jobInformer.Lister(), jobControl)
bkc := &Controller{
kubeClient: kubeCli,
@@ -84,10 +84,11 @@ func NewController(
backup.NewBackupManager(
backupCleaner,
statusUpdater,
- secretInformer.Lister(),
+ kubeCli,
jobInformer.Lister(),
jobControl,
pvcInformer.Lister(),
+ tcInformer.Lister(),
pvcControl,
),
),
@@ -115,8 +116,8 @@ func (bkc *Controller) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer bkc.queue.ShutDown()
- glog.Info("Starting backup controller")
- defer glog.Info("Shutting down backup controller")
+ klog.Info("Starting backup controller")
+ defer klog.Info("Shutting down backup controller")
for i := 0; i < workers; i++ {
go wait.Until(bkc.worker, time.Second, stopCh)
@@ -142,10 +143,10 @@ func (bkc *Controller) processNextWorkItem() bool {
defer bkc.queue.Done(key)
if err := bkc.sync(key.(string)); err != nil {
if perrors.Find(err, controller.IsRequeueError) != nil {
- glog.Infof("Backup: %v, still need sync: %v, requeuing", key.(string), err)
+ klog.Infof("Backup: %v, still need sync: %v, requeuing", key.(string), err)
bkc.queue.AddRateLimited(key)
} else if perrors.Find(err, controller.IsIgnoreError) != nil {
- glog.V(4).Infof("Backup: %v, ignore err: %v", key.(string), err)
+ klog.V(4).Infof("Backup: %v, ignore err: %v", key.(string), err)
} else {
utilruntime.HandleError(fmt.Errorf("Backup: %v, sync failed, err: %v, requeuing", key.(string), err))
bkc.queue.AddRateLimited(key)
@@ -160,7 +161,7 @@ func (bkc *Controller) processNextWorkItem() bool {
func (bkc *Controller) sync(key string) error {
startTime := time.Now()
defer func() {
- glog.V(4).Infof("Finished syncing Backup %q (%v)", key, time.Since(startTime))
+ klog.V(4).Infof("Finished syncing Backup %q (%v)", key, time.Since(startTime))
}()
ns, name, err := cache.SplitMetaNamespaceKey(key)
@@ -169,7 +170,7 @@ func (bkc *Controller) sync(key string) error {
}
backup, err := bkc.backupLister.Backups(ns).Get(name)
if errors.IsNotFound(err) {
- glog.Infof("Backup has been deleted %v", key)
+ klog.Infof("Backup has been deleted %v", key)
return nil
}
if err != nil {
@@ -190,27 +191,27 @@ func (bkc *Controller) updateBackup(cur interface{}) {
if newBackup.DeletionTimestamp != nil {
// the backup is being deleted, we need to do some cleanup work, enqueue backup.
- glog.Infof("backup %s/%s is being deleted", ns, name)
+ klog.Infof("backup %s/%s is being deleted", ns, name)
bkc.enqueueBackup(newBackup)
return
}
if v1alpha1.IsBackupInvalid(newBackup) {
- glog.V(4).Infof("backup %s/%s is invalid, skipping.", ns, name)
+ klog.V(4).Infof("backup %s/%s is invalid, skipping.", ns, name)
return
}
if v1alpha1.IsBackupComplete(newBackup) {
- glog.V(4).Infof("backup %s/%s is Complete, skipping.", ns, name)
+ klog.V(4).Infof("backup %s/%s is Complete, skipping.", ns, name)
return
}
if v1alpha1.IsBackupScheduled(newBackup) {
- glog.V(4).Infof("backup %s/%s is already scheduled, skipping", ns, name)
+ klog.V(4).Infof("backup %s/%s is already scheduled, skipping", ns, name)
return
}
- glog.V(4).Infof("backup object %s/%s enqueue", ns, name)
+ klog.V(4).Infof("backup object %s/%s enqueue", ns, name)
bkc.enqueueBackup(newBackup)
}
diff --git a/pkg/controller/backup/backup_controller_test.go b/pkg/controller/backup/backup_controller_test.go
index 692002dddb..ec1876393e 100644
--- a/pkg/controller/backup/backup_controller_test.go
+++ b/pkg/controller/backup/backup_controller_test.go
@@ -53,6 +53,7 @@ func TestBackupControllerUpdateBackup(t *testing.T) {
type testcase struct {
name string
backupHasBeenDeleted bool
+ backupIsInvalid bool
backupHasBeenCompleted bool
backupHasBeenScheduled bool
expectFn func(*GomegaWithT, *Controller)
@@ -68,6 +69,15 @@ func TestBackupControllerUpdateBackup(t *testing.T) {
backup.DeletionTimestamp = &metav1.Time{Time: time.Now()}
}
+ if test.backupIsInvalid {
+ backup.Status.Conditions = []v1alpha1.BackupCondition{
+ {
+ Type: v1alpha1.BackupInvalid,
+ Status: corev1.ConditionTrue,
+ },
+ }
+ }
+
if test.backupHasBeenCompleted {
backup.Status.Conditions = []v1alpha1.BackupCondition{
{
@@ -96,15 +106,27 @@ func TestBackupControllerUpdateBackup(t *testing.T) {
{
name: "backup has been deleted",
backupHasBeenDeleted: true,
+ backupIsInvalid: false,
backupHasBeenCompleted: false,
backupHasBeenScheduled: false,
expectFn: func(g *GomegaWithT, bkc *Controller) {
g.Expect(bkc.queue.Len()).To(Equal(1))
},
},
+ {
+ name: "backup is invalid",
+ backupHasBeenDeleted: false,
+ backupIsInvalid: true,
+ backupHasBeenCompleted: false,
+ backupHasBeenScheduled: false,
+ expectFn: func(g *GomegaWithT, bkc *Controller) {
+ g.Expect(bkc.queue.Len()).To(Equal(0))
+ },
+ },
{
name: "backup has been completed",
backupHasBeenDeleted: false,
+ backupIsInvalid: false,
backupHasBeenCompleted: true,
backupHasBeenScheduled: false,
expectFn: func(g *GomegaWithT, bkc *Controller) {
@@ -114,6 +136,7 @@ func TestBackupControllerUpdateBackup(t *testing.T) {
{
name: "backup has been scheduled",
backupHasBeenDeleted: false,
+ backupIsInvalid: false,
backupHasBeenCompleted: false,
backupHasBeenScheduled: true,
expectFn: func(g *GomegaWithT, bkc *Controller) {
@@ -123,6 +146,7 @@ func TestBackupControllerUpdateBackup(t *testing.T) {
{
name: "backup is newly created",
backupHasBeenDeleted: false,
+ backupIsInvalid: false,
backupHasBeenCompleted: false,
backupHasBeenScheduled: false,
expectFn: func(g *GomegaWithT, bkc *Controller) {
diff --git a/pkg/controller/backup_control.go b/pkg/controller/backup_control.go
index 7173213ffa..4052b2ab41 100644
--- a/pkg/controller/backup_control.go
+++ b/pkg/controller/backup_control.go
@@ -38,7 +38,7 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// BackupControlInterface manages Backups used in BackupSchedule
@@ -70,9 +70,9 @@ func (rbc *realBackupControl) CreateBackup(backup *v1alpha1.Backup) (*v1alpha1.B
bsName := backup.GetLabels()[label.BackupScheduleLabelKey]
backup, err := rbc.cli.PingcapV1alpha1().Backups(ns).Create(backup)
if err != nil {
- glog.Errorf("failed to create Backup: [%s/%s] for backupSchedule/%s, err: %v", ns, backupName, bsName, err)
+ klog.Errorf("failed to create Backup: [%s/%s] for backupSchedule/%s, err: %v", ns, backupName, bsName, err)
} else {
- glog.V(4).Infof("create Backup: [%s/%s] for backupSchedule/%s successfully", ns, backupName, bsName)
+ klog.V(4).Infof("create Backup: [%s/%s] for backupSchedule/%s successfully", ns, backupName, bsName)
}
rbc.recordBackupEvent("create", backup, err)
return backup, err
@@ -85,9 +85,9 @@ func (rbc *realBackupControl) DeleteBackup(backup *v1alpha1.Backup) error {
bsName := backup.GetLabels()[label.BackupScheduleLabelKey]
err := rbc.cli.PingcapV1alpha1().Backups(ns).Delete(backupName, nil)
if err != nil {
- glog.Errorf("failed to delete Backup: [%s/%s] for backupSchedule/%s, err: %v", ns, backupName, bsName, err)
+ klog.Errorf("failed to delete Backup: [%s/%s] for backupSchedule/%s, err: %v", ns, backupName, bsName, err)
} else {
- glog.V(4).Infof("delete backup: [%s/%s] successfully, backupSchedule/%s", ns, backupName, bsName)
+ klog.V(4).Infof("delete backup: [%s/%s] successfully, backupSchedule/%s", ns, backupName, bsName)
}
rbc.recordBackupEvent("delete", backup, err)
return err
diff --git a/pkg/controller/backup_schedule_status_updater.go b/pkg/controller/backup_schedule_status_updater.go
index 9e633f9306..14cfdc0a83 100644
--- a/pkg/controller/backup_schedule_status_updater.go
+++ b/pkg/controller/backup_schedule_status_updater.go
@@ -17,7 +17,7 @@ import (
"fmt"
"strings"
- glog "k8s.io/klog"
+ "k8s.io/klog"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/client/clientset/versioned"
@@ -68,7 +68,7 @@ func (bss *realBackupScheduleStatusUpdater) UpdateBackupScheduleStatus(
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
_, updateErr := bss.cli.PingcapV1alpha1().BackupSchedules(ns).Update(bs)
if updateErr == nil {
- glog.Infof("BackupSchedule: [%s/%s] updated successfully", ns, bsName)
+ klog.Infof("BackupSchedule: [%s/%s] updated successfully", ns, bsName)
return nil
}
if updated, err := bss.bsLister.BackupSchedules(ns).Get(bsName); err == nil {
diff --git a/pkg/controller/backup_status_updater.go b/pkg/controller/backup_status_updater.go
index 9a6490d24d..c35d34a1b8 100644
--- a/pkg/controller/backup_status_updater.go
+++ b/pkg/controller/backup_status_updater.go
@@ -17,7 +17,7 @@ import (
"fmt"
"strings"
- glog "k8s.io/klog"
+ "k8s.io/klog"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/client/clientset/versioned"
@@ -63,7 +63,7 @@ func (bcu *realBackupConditionUpdater) Update(backup *v1alpha1.Backup, condition
if isUpdate {
_, updateErr := bcu.cli.PingcapV1alpha1().Backups(ns).Update(backup)
if updateErr == nil {
- glog.Infof("Backup: [%s/%s] updated successfully", ns, backupName)
+ klog.Infof("Backup: [%s/%s] updated successfully", ns, backupName)
return nil
}
if updated, err := bcu.backupLister.Backups(ns).Get(backupName); err == nil {
diff --git a/pkg/controller/backupschedule/backup_schedule_controller.go b/pkg/controller/backupschedule/backup_schedule_controller.go
index fd6c4177cd..ce0f2d84d7 100644
--- a/pkg/controller/backupschedule/backup_schedule_controller.go
+++ b/pkg/controller/backupschedule/backup_schedule_controller.go
@@ -34,7 +34,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// Controller controls restore.
@@ -62,7 +62,7 @@ func NewController(
kubeInformerFactory kubeinformers.SharedInformerFactory,
) *Controller {
eventBroadcaster := record.NewBroadcaster()
- eventBroadcaster.StartLogging(glog.Infof)
+ eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&eventv1.EventSinkImpl{
Interface: eventv1.New(kubeCli.CoreV1().RESTClient()).Events("")})
recorder := eventBroadcaster.NewRecorder(v1alpha1.Scheme, corev1.EventSource{Component: "backupSchedule"})
@@ -111,8 +111,8 @@ func (bsc *Controller) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer bsc.queue.ShutDown()
- glog.Info("Starting backup schedule controller")
- defer glog.Info("Shutting down backup schedule controller")
+ klog.Info("Starting backup schedule controller")
+ defer klog.Info("Shutting down backup schedule controller")
for i := 0; i < workers; i++ {
go wait.Until(bsc.worker, time.Second, stopCh)
@@ -138,10 +138,10 @@ func (bsc *Controller) processNextWorkItem() bool {
defer bsc.queue.Done(key)
if err := bsc.sync(key.(string)); err != nil {
if perrors.Find(err, controller.IsRequeueError) != nil {
- glog.Infof("BackupSchedule: %v, still need sync: %v, requeuing", key.(string), err)
+ klog.Infof("BackupSchedule: %v, still need sync: %v, requeuing", key.(string), err)
bsc.queue.AddRateLimited(key)
} else if perrors.Find(err, controller.IsIgnoreError) != nil {
- glog.V(4).Infof("BackupSchedule: %v, ignore err: %v, waiting for the next sync", key.(string), err)
+ klog.V(4).Infof("BackupSchedule: %v, ignore err: %v, waiting for the next sync", key.(string), err)
} else {
utilruntime.HandleError(fmt.Errorf("BackupSchedule: %v, sync failed, err: %v, requeuing", key.(string), err))
bsc.queue.AddRateLimited(key)
@@ -156,7 +156,7 @@ func (bsc *Controller) processNextWorkItem() bool {
func (bsc *Controller) sync(key string) error {
startTime := time.Now()
defer func() {
- glog.V(4).Infof("Finished syncing BackupSchedule %q (%v)", key, time.Since(startTime))
+ klog.V(4).Infof("Finished syncing BackupSchedule %q (%v)", key, time.Since(startTime))
}()
ns, name, err := cache.SplitMetaNamespaceKey(key)
@@ -165,7 +165,7 @@ func (bsc *Controller) sync(key string) error {
}
bs, err := bsc.bsLister.BackupSchedules(ns).Get(name)
if errors.IsNotFound(err) {
- glog.Infof("BackupSchedule has been deleted %v", key)
+ klog.Infof("BackupSchedule has been deleted %v", key)
return nil
}
if err != nil {
diff --git a/pkg/controller/cert_control.go b/pkg/controller/cert_control.go
deleted file mode 100644
index 86c25f17d3..0000000000
--- a/pkg/controller/cert_control.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2019 PingCAP, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package controller
-
-import (
- "encoding/pem"
- "fmt"
- "time"
-
- "github.com/pingcap/tidb-operator/pkg/label"
- certutil "github.com/pingcap/tidb-operator/pkg/util/crypto"
- capi "k8s.io/api/certificates/v1beta1"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- types "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/fields"
- "k8s.io/client-go/kubernetes"
- certlisters "k8s.io/client-go/listers/certificates/v1beta1"
- glog "k8s.io/klog"
-)
-
-// TiDBClusterCertOptions contains information needed to create new certificates
-type TiDBClusterCertOptions struct {
- Namespace string
- Instance string
- CommonName string
- HostList []string
- IPList []string
- Suffix string
- Component string
-}
-
-// CertControlInterface manages certificates used by TiDB clusters
-type CertControlInterface interface {
- Create(or metav1.OwnerReference, certOpts *TiDBClusterCertOptions) error
- CheckSecret(ns string, secretName string) bool
- //RevokeCert() error
- //RenewCert() error
-}
-
-type realCertControl struct {
- kubeCli kubernetes.Interface
- csrLister certlisters.CertificateSigningRequestLister
- secControl SecretControlInterface
-}
-
-// NewRealCertControl creates a new CertControlInterface
-func NewRealCertControl(
- kubeCli kubernetes.Interface,
- csrLister certlisters.CertificateSigningRequestLister,
- secControl SecretControlInterface,
-) CertControlInterface {
- return &realCertControl{
- kubeCli: kubeCli,
- csrLister: csrLister,
- secControl: secControl,
- }
-}
-
-func (rcc *realCertControl) Create(or metav1.OwnerReference, certOpts *TiDBClusterCertOptions) error {
- var csrName string
- if certOpts.Suffix == "" {
- csrName = certOpts.Instance
- } else {
- csrName = fmt.Sprintf("%s-%s", certOpts.Instance, certOpts.Suffix)
- }
-
- // generate certificate if not exist
- if rcc.secControl.Check(certOpts.Namespace, csrName) {
- glog.Infof("Secret %s already exist, reusing the key pair. TidbCluster: %s/%s", csrName, certOpts.Namespace, csrName)
- return nil
- }
-
- rawCSR, key, err := certutil.NewCSR(certOpts.CommonName, certOpts.HostList, certOpts.IPList)
- if err != nil {
- return fmt.Errorf("fail to generate new key and certificate for %s/%s, %v", certOpts.Namespace, csrName, err)
- }
-
- // sign certificate
- csr, err := rcc.sendCSR(or, certOpts.Namespace, certOpts.Instance, rawCSR, csrName)
- if err != nil {
- return err
- }
- err = rcc.approveCSR(csr)
- if err != nil {
- return err
- }
-
- // wait at most 5min for the cert to be signed
- timeout := int64(time.Minute.Seconds() * 5)
- tick := time.After(time.Second * 10)
- watchReq := types.ListOptions{
- Watch: true,
- TimeoutSeconds: &timeout,
- FieldSelector: fields.OneTermEqualSelector("metadata.name", csrName).String(),
- }
-
- csrCh, err := rcc.kubeCli.CertificatesV1beta1().CertificateSigningRequests().Watch(watchReq)
- if err != nil {
- glog.Errorf("error watch CSR for [%s/%s]: %s", certOpts.Namespace, certOpts.Instance, csrName)
- return err
- }
-
- watchCh := csrCh.ResultChan()
- for {
- select {
- case <-tick:
- glog.Infof("CSR still not approved for [%s/%s]: %s, retry later", certOpts.Namespace, certOpts.Instance, csrName)
- continue
- case event, ok := <-watchCh:
- if !ok {
- return fmt.Errorf("fail to get signed certificate for %s", csrName)
- }
-
- if len(event.Object.(*capi.CertificateSigningRequest).Status.Conditions) == 0 {
- continue
- }
-
- updatedCSR := event.Object.(*capi.CertificateSigningRequest)
- approveCond := updatedCSR.Status.Conditions[len(csr.Status.Conditions)-1].Type
-
- if updatedCSR.UID == csr.UID &&
- approveCond == capi.CertificateApproved &&
- updatedCSR.Status.Certificate != nil {
- glog.Infof("signed certificate for [%s/%s]: %s", certOpts.Namespace, certOpts.Instance, csrName)
-
- // save signed certificate and key to secret
- err = rcc.secControl.Create(or, certOpts, updatedCSR.Status.Certificate, key)
- if err == nil {
- // cleanup the approved csr
- delOpts := &types.DeleteOptions{TypeMeta: types.TypeMeta{Kind: "CertificateSigningRequest"}}
- return rcc.kubeCli.CertificatesV1beta1().CertificateSigningRequests().Delete(csrName, delOpts)
- }
- return err
- }
- continue
- }
- }
-}
-
-func (rcc *realCertControl) getCSR(ns string, instance string, csrName string) (*capi.CertificateSigningRequest, error) {
- csr, err := rcc.csrLister.Get(csrName)
- if err != nil && apierrors.IsNotFound(err) {
- // it's supposed to be not found
- return nil, nil
- }
- if err != nil {
- // something else went wrong
- return nil, err
- }
-
- labelTemp := label.New()
- if csr.Labels[label.NamespaceLabelKey] == ns &&
- csr.Labels[label.ManagedByLabelKey] == labelTemp[label.ManagedByLabelKey] &&
- csr.Labels[label.InstanceLabelKey] == instance {
- return csr, nil
- }
- return nil, fmt.Errorf("CSR %s/%s already exist, but not created by tidb-operator, skip it", ns, csrName)
-}
-
-func (rcc *realCertControl) sendCSR(or metav1.OwnerReference, ns, instance string, rawCSR []byte, csrName string) (*capi.CertificateSigningRequest, error) {
- var csr *capi.CertificateSigningRequest
-
- // check for exist CSR, overwrite if it was created by operator, otherwise block the process
- csr, err := rcc.getCSR(ns, instance, csrName)
- if err != nil {
- return nil, fmt.Errorf("failed to create CSR for [%s/%s]: %s, error: %v", ns, instance, csrName, err)
- }
-
- if csr != nil {
- glog.Infof("found exist CSR %s/%s created by tidb-operator, overwriting", ns, csrName)
- delOpts := &types.DeleteOptions{TypeMeta: types.TypeMeta{Kind: "CertificateSigningRequest"}}
- err := rcc.kubeCli.CertificatesV1beta1().CertificateSigningRequests().Delete(csrName, delOpts)
- if err != nil {
- return nil, fmt.Errorf("failed to delete exist old CSR for [%s/%s]: %s, error: %v", ns, instance, csrName, err)
- }
- glog.Infof("exist old CSR deleted for [%s/%s]: %s", ns, instance, csrName)
- return rcc.sendCSR(or, ns, instance, rawCSR, csrName)
- }
-
- csrLabels := label.New().Instance(instance).Labels()
- csr = &capi.CertificateSigningRequest{
- TypeMeta: types.TypeMeta{Kind: "CertificateSigningRequest"},
- ObjectMeta: types.ObjectMeta{
- Name: csrName,
- Labels: csrLabels,
- OwnerReferences: []metav1.OwnerReference{or},
- },
- Spec: capi.CertificateSigningRequestSpec{
- Request: pem.EncodeToMemory(&pem.Block{
- Type: "CERTIFICATE REQUEST",
- Headers: nil,
- Bytes: rawCSR,
- }),
- Usages: []capi.KeyUsage{
- capi.UsageClientAuth,
- capi.UsageServerAuth,
- },
- },
- }
-
- resp, err := rcc.kubeCli.CertificatesV1beta1().CertificateSigningRequests().Create(csr)
- if err != nil {
- return resp, fmt.Errorf("failed to create CSR for [%s/%s]: %s, error: %v", ns, instance, csrName, err)
- }
- glog.Infof("CSR created for [%s/%s]: %s", ns, instance, csrName)
- return resp, nil
-}
-
-func (rcc *realCertControl) approveCSR(csr *capi.CertificateSigningRequest) error {
- csr.Status.Conditions = append(csr.Status.Conditions, capi.CertificateSigningRequestCondition{
- Type: capi.CertificateApproved,
- Reason: "AutoApproved",
- Message: "Auto approved by TiDB Operator",
- })
-
- _, err := rcc.kubeCli.CertificatesV1beta1().CertificateSigningRequests().UpdateApproval(csr)
- if err != nil {
- return fmt.Errorf("error updating approval for csr: %v", err)
- }
- return nil
-}
-
-/*
-func (rcc *realCertControl) RevokeCert() error {
- return nil
-}
-*/
-/*
-func (rcc *realCertControl) RenewCert() error {
- return nil
-}
-*/
-
-func (rcc *realCertControl) CheckSecret(ns string, secretName string) bool {
- return rcc.secControl.Check(ns, secretName)
-}
-
-var _ CertControlInterface = &realCertControl{}
-
-type FakeCertControl struct {
- realCertControl
-}
-
-func NewFakeCertControl(
- kubeCli kubernetes.Interface,
- csrLister certlisters.CertificateSigningRequestLister,
- secControl SecretControlInterface,
-) CertControlInterface {
- return &realCertControl{
- kubeCli: kubeCli,
- csrLister: csrLister,
- secControl: secControl,
- }
-}
diff --git a/pkg/controller/configmap_control.go b/pkg/controller/configmap_control.go
index 6e05755b58..137e227b1a 100644
--- a/pkg/controller/configmap_control.go
+++ b/pkg/controller/configmap_control.go
@@ -23,7 +23,6 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
- corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
@@ -44,19 +43,16 @@ type ConfigMapControlInterface interface {
type realConfigMapControl struct {
client client.Client
kubeCli kubernetes.Interface
- cmLister corelisters.ConfigMapLister
recorder record.EventRecorder
}
// NewRealSecretControl creates a new SecretControlInterface
func NewRealConfigMapControl(
kubeCli kubernetes.Interface,
- cmLister corelisters.ConfigMapLister,
recorder record.EventRecorder,
) ConfigMapControlInterface {
return &realConfigMapControl{
kubeCli: kubeCli,
- cmLister: cmLister,
recorder: recorder,
}
}
@@ -81,7 +77,7 @@ func (cc *realConfigMapControl) UpdateConfigMap(owner runtime.Object, cm *corev1
return nil
}
- if updated, err := cc.cmLister.ConfigMaps(cm.Namespace).Get(cmName); err != nil {
+ if updated, err := cc.kubeCli.CoreV1().ConfigMaps(cm.Namespace).Get(cmName, metav1.GetOptions{}); err != nil {
utilruntime.HandleError(fmt.Errorf("error getting updated ConfigMap %s/%s from lister: %v", ns, cmName, err))
} else {
cm = updated.DeepCopy()
@@ -124,7 +120,6 @@ var _ ConfigMapControlInterface = &realConfigMapControl{}
// NewFakeConfigMapControl returns a FakeConfigMapControl
func NewFakeConfigMapControl(cmInformer coreinformers.ConfigMapInformer) *FakeConfigMapControl {
return &FakeConfigMapControl{
- cmInformer.Lister(),
cmInformer.Informer().GetIndexer(),
RequestTracker{},
RequestTracker{},
@@ -134,7 +129,6 @@ func NewFakeConfigMapControl(cmInformer coreinformers.ConfigMapInformer) *FakeCo
// FakeConfigMapControl is a fake ConfigMapControlInterface
type FakeConfigMapControl struct {
- CmLister corelisters.ConfigMapLister
CmIndexer cache.Indexer
createConfigMapTracker RequestTracker
updateConfigMapTracker RequestTracker
diff --git a/pkg/controller/configmap_control_test.go b/pkg/controller/configmap_control_test.go
index ffdfb8ad88..47fb0ee7b2 100644
--- a/pkg/controller/configmap_control_test.go
+++ b/pkg/controller/configmap_control_test.go
@@ -23,9 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
- corelisters "k8s.io/client-go/listers/core/v1"
core "k8s.io/client-go/testing"
- "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
)
@@ -35,7 +33,7 @@ func TestConfigMapControlCreatesConfigMaps(t *testing.T) {
tc := newTidbCluster()
cm := newConfigMap()
fakeClient := &fake.Clientset{}
- control := NewRealConfigMapControl(fakeClient, nil, recorder)
+ control := NewRealConfigMapControl(fakeClient, recorder)
fakeClient.AddReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
create := action.(core.CreateAction)
return true, create.GetObject(), nil
@@ -54,7 +52,7 @@ func TestConfigMapControlCreatesConfigMapFailed(t *testing.T) {
tc := newTidbCluster()
cm := newConfigMap()
fakeClient := &fake.Clientset{}
- control := NewRealConfigMapControl(fakeClient, nil, recorder)
+ control := NewRealConfigMapControl(fakeClient, recorder)
fakeClient.AddReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
})
@@ -73,7 +71,7 @@ func TestConfigMapControlUpdateConfigMap(t *testing.T) {
cm := newConfigMap()
cm.Data["file"] = "test"
fakeClient := &fake.Clientset{}
- control := NewRealConfigMapControl(fakeClient, nil, recorder)
+ control := NewRealConfigMapControl(fakeClient, recorder)
fakeClient.AddReactor("update", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
return true, update.GetObject(), nil
@@ -90,13 +88,9 @@ func TestConfigMapControlUpdateConfigMapConflictSuccess(t *testing.T) {
cm := newConfigMap()
cm.Data["file"] = "test"
fakeClient := &fake.Clientset{}
- indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
oldcm := newConfigMap()
oldcm.Data["file"] = "test2"
- err := indexer.Add(oldcm)
- g.Expect(err).To(Succeed())
- cmLister := corelisters.NewConfigMapLister(indexer)
- control := NewRealConfigMapControl(fakeClient, cmLister, recorder)
+ control := NewRealConfigMapControl(fakeClient, recorder)
conflict := false
fakeClient.AddReactor("update", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
update := action.(core.UpdateAction)
@@ -117,7 +111,7 @@ func TestConfigMapControlDeleteConfigMap(t *testing.T) {
tc := newTidbCluster()
cm := newConfigMap()
fakeClient := &fake.Clientset{}
- control := NewRealConfigMapControl(fakeClient, nil, recorder)
+ control := NewRealConfigMapControl(fakeClient, recorder)
fakeClient.AddReactor("delete", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, nil
})
@@ -134,7 +128,7 @@ func TestConfigMapControlDeleteConfigMapFailed(t *testing.T) {
tc := newTidbCluster()
cm := newConfigMap()
fakeClient := &fake.Clientset{}
- control := NewRealConfigMapControl(fakeClient, nil, recorder)
+ control := NewRealConfigMapControl(fakeClient, recorder)
fakeClient.AddReactor("delete", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewInternalError(errors.New("API server down"))
})
diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go
index 2f6ef3de40..6ee5f55e69 100644
--- a/pkg/controller/controller_utils.go
+++ b/pkg/controller/controller_utils.go
@@ -32,7 +32,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/retry"
"k8s.io/client-go/util/workqueue"
- glog "k8s.io/klog"
+ "k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -218,7 +218,7 @@ func TiKVCapacity(limits corev1.ResourceList) string {
}
i, b := q.AsInt64()
if !b {
- glog.Errorf("quantity %s can't be converted to int64", q.String())
+ klog.Errorf("quantity %s can't be converted to int64", q.String())
return defaultArgs
}
if i%humanize.GiByte == 0 {
@@ -247,6 +247,16 @@ func TiKVPeerMemberName(clusterName string) string {
return fmt.Sprintf("%s-tikv-peer", clusterName)
}
+// TiFlashMemberName returns tiflash member name
+func TiFlashMemberName(clusterName string) string {
+ return fmt.Sprintf("%s-tiflash", clusterName)
+}
+
+// TiFlashPeerMemberName returns tiflash peer service name
+func TiFlashPeerMemberName(clusterName string) string {
+ return fmt.Sprintf("%s-tiflash-peer", clusterName)
+}
+
// TiDBMemberName returns tidb member name
func TiDBMemberName(clusterName string) string {
return fmt.Sprintf("%s-tidb", clusterName)
@@ -436,7 +446,7 @@ func WatchForController(informer cache.SharedIndexInformer, q workqueue.Interfac
controllerObj, err := fn(meta.GetNamespace(), ref.Name)
if err != nil {
if errors.IsNotFound(err) {
- glog.V(4).Infof("controller %s/%s of %s/%s not found, ignore",
+ klog.V(4).Infof("controller %s/%s of %s/%s not found, ignore",
meta.GetNamespace(), ref.Name, meta.GetNamespace(), meta.GetName())
} else {
utilruntime.HandleError(fmt.Errorf("cannot get controller %s/%s of %s/%s",
diff --git a/pkg/controller/general_pvc_control.go b/pkg/controller/general_pvc_control.go
index b3bb84925e..b76b0cf5b0 100644
--- a/pkg/controller/general_pvc_control.go
+++ b/pkg/controller/general_pvc_control.go
@@ -25,7 +25,7 @@ import (
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// GeneralPVCControlInterface manages PVCs used in backup and restore's pvc
@@ -57,9 +57,9 @@ func (gpc *realGeneralPVCControl) CreatePVC(object runtime.Object, pvc *corev1.P
_, err := gpc.kubeCli.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
if err != nil {
- glog.Errorf("failed to create pvc: [%s/%s], %s: %s, %v", ns, pvcName, kind, instanceName, err)
+ klog.Errorf("failed to create pvc: [%s/%s], %s: %s, %v", ns, pvcName, kind, instanceName, err)
} else {
- glog.V(4).Infof("create pvc: [%s/%s] successfully, %s: %s", ns, pvcName, kind, instanceName)
+ klog.V(4).Infof("create pvc: [%s/%s] successfully, %s: %s", ns, pvcName, kind, instanceName)
}
gpc.recordPVCEvent("create", object, pvc, err)
return err
diff --git a/pkg/controller/generic_control.go b/pkg/controller/generic_control.go
index 01820e9eff..efaa6648de 100644
--- a/pkg/controller/generic_control.go
+++ b/pkg/controller/generic_control.go
@@ -271,8 +271,27 @@ func (w *typedWrapper) CreateOrUpdateService(controller runtime.Object, svc *cor
}
existingSvc.Annotations[LastAppliedConfigAnnotation] = string(b)
clusterIp := existingSvc.Spec.ClusterIP
+ ports := existingSvc.Spec.Ports
+ serviceType := existingSvc.Spec.Type
+
existingSvc.Spec = desiredSvc.Spec
existingSvc.Spec.ClusterIP = clusterIp
+
+ // If the existed service and the desired service is NodePort or LoadBalancerType, we should keep the nodePort unchanged.
+ if (serviceType == corev1.ServiceTypeNodePort || serviceType == corev1.ServiceTypeLoadBalancer) &&
+ (desiredSvc.Spec.Type == corev1.ServiceTypeNodePort || desiredSvc.Spec.Type == corev1.ServiceTypeLoadBalancer) {
+ for i, dport := range existingSvc.Spec.Ports {
+ for _, eport := range ports {
+ // Because the portName could be edited,
+ // we use Port number to link the desired Service Port and the existed Service Port in the nested loop
+ if dport.Port == eport.Port && dport.Protocol == eport.Protocol {
+ dport.NodePort = eport.NodePort
+ existingSvc.Spec.Ports[i] = dport
+ break
+ }
+ }
+ }
+ }
}
return nil
})
diff --git a/pkg/controller/job_control.go b/pkg/controller/job_control.go
index db38ba55fa..b978f0f5ce 100644
--- a/pkg/controller/job_control.go
+++ b/pkg/controller/job_control.go
@@ -27,7 +27,7 @@ import (
batchlisters "k8s.io/client-go/listers/batch/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// JobControlInterface manages Jobs used in backup、restore and clean
@@ -60,9 +60,9 @@ func (rjc *realJobControl) CreateJob(object runtime.Object, job *batchv1.Job) er
_, err := rjc.kubeCli.BatchV1().Jobs(ns).Create(job)
if err != nil {
- glog.Errorf("failed to create %s job: [%s/%s], cluster: %s, err: %v", strings.ToLower(kind), ns, jobName, instanceName, err)
+ klog.Errorf("failed to create %s job: [%s/%s], cluster: %s, err: %v", strings.ToLower(kind), ns, jobName, instanceName, err)
} else {
- glog.V(4).Infof("create %s job: [%s/%s] successfully, cluster: %s", strings.ToLower(kind), ns, jobName, instanceName)
+ klog.V(4).Infof("create %s job: [%s/%s] successfully, cluster: %s", strings.ToLower(kind), ns, jobName, instanceName)
}
rjc.recordJobEvent("create", object, job, err)
return err
@@ -80,9 +80,9 @@ func (rjc *realJobControl) DeleteJob(object runtime.Object, job *batchv1.Job) er
}
err := rjc.kubeCli.BatchV1().Jobs(ns).Delete(jobName, opts)
if err != nil {
- glog.Errorf("failed to delete %s job: [%s/%s], cluster: %s, err: %v", strings.ToLower(kind), ns, jobName, instanceName, err)
+ klog.Errorf("failed to delete %s job: [%s/%s], cluster: %s, err: %v", strings.ToLower(kind), ns, jobName, instanceName, err)
} else {
- glog.V(4).Infof("delete %s job: [%s/%s] successfully, cluster: %s", strings.ToLower(kind), ns, jobName, instanceName)
+ klog.V(4).Infof("delete %s job: [%s/%s] successfully, cluster: %s", strings.ToLower(kind), ns, jobName, instanceName)
}
rjc.recordJobEvent("delete", object, job, err)
return err
diff --git a/pkg/controller/periodicity/periodicity_controller.go b/pkg/controller/periodicity/periodicity_controller.go
new file mode 100644
index 0000000000..2d3a8079a2
--- /dev/null
+++ b/pkg/controller/periodicity/periodicity_controller.go
@@ -0,0 +1,122 @@
+// Copyright 2018 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package periodicity dedicate the periodicity controller.
+// This controller updates StatefulSets managed by our operator periodically.
+// This is necessary when the pod admission webhook is used. Because we will
+// deny pod deletion requests if the pod is not ready for deletion. However,
+// retry duration on StatefulSet in its controller grows exponentially on
+// failures. So we need to update StatefulSets to trigger events, then they
+// will be put into the process queue of StatefulSet controller constantly.
+// Refer to https://github.com/pingcap/tidb-operator/pull/1875 and
+// https://github.com/pingcap/tidb-operator/issues/1846 for more details.
+package periodicity
+
+import (
+ "time"
+
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions"
+ v1alpha1listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1"
+ "github.com/pingcap/tidb-operator/pkg/controller"
+ "github.com/pingcap/tidb-operator/pkg/label"
+ "github.com/pingcap/tidb-operator/pkg/util"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/util/errors"
+ "k8s.io/apimachinery/pkg/util/wait"
+ kubeinformers "k8s.io/client-go/informers"
+ "k8s.io/client-go/kubernetes"
+ eventv1 "k8s.io/client-go/kubernetes/typed/core/v1"
+ appslisters "k8s.io/client-go/listers/apps/v1"
+ "k8s.io/client-go/tools/record"
+ "k8s.io/klog"
+)
+
+type Controller struct {
+ stsLister appslisters.StatefulSetLister
+ tcLister v1alpha1listers.TidbClusterLister
+ statefulSetControl controller.StatefulSetControlInterface
+}
+
+func NewController(
+ kubeCli kubernetes.Interface,
+ informerFactory informers.SharedInformerFactory,
+ kubeInformerFactory kubeinformers.SharedInformerFactory) *Controller {
+
+ eventBroadcaster := record.NewBroadcaster()
+ eventBroadcaster.StartLogging(klog.Infof)
+ eventBroadcaster.StartRecordingToSink(&eventv1.EventSinkImpl{
+ Interface: eventv1.New(kubeCli.CoreV1().RESTClient()).Events("")})
+ recorder := eventBroadcaster.NewRecorder(v1alpha1.Scheme, corev1.EventSource{Component: "periodiciy-controller"})
+ stsLister := kubeInformerFactory.Apps().V1().StatefulSets().Lister()
+
+ return &Controller{
+ tcLister: informerFactory.Pingcap().V1alpha1().TidbClusters().Lister(),
+ statefulSetControl: controller.NewRealStatefuSetControl(kubeCli, stsLister, recorder),
+ stsLister: stsLister,
+ }
+
+}
+
+func (c *Controller) Run(stopCh <-chan struct{}) {
+ klog.Infof("Staring periodicity controller")
+ defer klog.Infof("Shutting down periodicity controller")
+ wait.Until(c.run, time.Minute, stopCh)
+}
+
+func (c *Controller) run() {
+ var errs []error
+ if err := c.syncStatefulSetTimeStamp(); err != nil {
+ errs = append(errs, err)
+ }
+ if len(errs) > 0 {
+ klog.Errorf("error happened in periodicity controller,err:%v", errors.NewAggregate(errs))
+ }
+}
+
+// in this sync function, we update all stateful sets the operator managed and log errors
+func (c *Controller) syncStatefulSetTimeStamp() error {
+ selector, err := label.New().Selector()
+ if err != nil {
+ return err
+ }
+ stsList, err := c.stsLister.List(selector)
+ if err != nil {
+ return err
+ }
+ var errs []error
+ for _, sts := range stsList {
+ // If there is any error during our sts annotation updating, we just collect the error
+ // and continue to next sts
+ ok, tcRef := util.IsOwnedByTidbCluster(sts)
+ if !ok {
+ continue
+ }
+ tc, err := c.tcLister.TidbClusters(sts.Namespace).Get(tcRef.Name)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ if sts.Annotations == nil {
+ sts.Annotations = map[string]string{}
+ }
+ sts.Annotations[label.AnnStsLastSyncTimestamp] = time.Now().Format(time.RFC3339)
+ newSts, err := c.statefulSetControl.UpdateStatefulSet(tc, sts)
+ if err != nil {
+ klog.Errorf("failed to update statefulset %q, error: %v", sts.Name, err)
+ errs = append(errs, err)
+ }
+ klog.Infof("successfully updated statefulset %q", newSts.Name)
+ }
+ return errors.NewAggregate(errs)
+}
diff --git a/pkg/controller/pod_control.go b/pkg/controller/pod_control.go
index 06be0d3efd..02874498aa 100644
--- a/pkg/controller/pod_control.go
+++ b/pkg/controller/pod_control.go
@@ -30,7 +30,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// PodControlInterface manages Pods used in TidbCluster
@@ -77,10 +77,10 @@ func (rpc *realPodControl) UpdatePod(tc *v1alpha1.TidbCluster, pod *corev1.Pod)
var updateErr error
updatePod, updateErr = rpc.kubeCli.CoreV1().Pods(ns).Update(pod)
if updateErr == nil {
- glog.Infof("Pod: [%s/%s] updated successfully, TidbCluster: [%s/%s]", ns, podName, ns, tcName)
+ klog.Infof("Pod: [%s/%s] updated successfully, TidbCluster: [%s/%s]", ns, podName, ns, tcName)
return nil
}
- glog.Errorf("failed to update Pod: [%s/%s], error: %v", ns, podName, updateErr)
+ klog.Errorf("failed to update Pod: [%s/%s], error: %v", ns, podName, updateErr)
if updated, err := rpc.podLister.Pods(ns).Get(podName); err == nil {
// make a copy so we don't mutate the shared cache
@@ -156,7 +156,7 @@ func (rpc *realPodControl) UpdateMetaInfo(tc *v1alpha1.TidbCluster, pod *corev1.
if labels[label.ClusterIDLabelKey] == clusterID &&
labels[label.MemberIDLabelKey] == memberID &&
labels[label.StoreIDLabelKey] == storeID {
- glog.V(4).Infof("pod %s/%s already has cluster labels set, skipping. TidbCluster: %s", ns, podName, tcName)
+ klog.V(4).Infof("pod %s/%s already has cluster labels set, skipping. TidbCluster: %s", ns, podName, tcName)
return pod, nil
}
// labels is a pointer, modify labels will modify pod.Labels
@@ -169,10 +169,10 @@ func (rpc *realPodControl) UpdateMetaInfo(tc *v1alpha1.TidbCluster, pod *corev1.
var updateErr error
updatePod, updateErr = rpc.kubeCli.CoreV1().Pods(ns).Update(pod)
if updateErr == nil {
- glog.V(4).Infof("update pod %s/%s with cluster labels %v successfully, TidbCluster: %s", ns, podName, labels, tcName)
+ klog.V(4).Infof("update pod %s/%s with cluster labels %v successfully, TidbCluster: %s", ns, podName, labels, tcName)
return nil
}
- glog.Errorf("failed to update pod %s/%s with cluster labels %v, TidbCluster: %s, err: %v", ns, podName, labels, tcName, updateErr)
+ klog.Errorf("failed to update pod %s/%s with cluster labels %v, TidbCluster: %s, err: %v", ns, podName, labels, tcName, updateErr)
if updated, err := rpc.podLister.Pods(ns).Get(podName); err == nil {
// make a copy so we don't mutate the shared cache
@@ -191,13 +191,13 @@ func (rpc *realPodControl) DeletePod(tc *v1alpha1.TidbCluster, pod *corev1.Pod)
ns := tc.GetNamespace()
tcName := tc.GetName()
podName := pod.GetName()
- preconditions := metav1.Preconditions{UID: &pod.UID}
+ preconditions := metav1.Preconditions{UID: &pod.UID, ResourceVersion: &pod.ResourceVersion}
deleteOptions := metav1.DeleteOptions{Preconditions: &preconditions}
err := rpc.kubeCli.CoreV1().Pods(ns).Delete(podName, &deleteOptions)
if err != nil {
- glog.Errorf("failed to delete Pod: [%s/%s], TidbCluster: %s, %v", ns, podName, tcName, err)
+ klog.Errorf("failed to delete Pod: [%s/%s], TidbCluster: %s, %v", ns, podName, tcName, err)
} else {
- glog.V(4).Infof("delete Pod: [%s/%s] successfully, TidbCluster: %s", ns, podName, tcName)
+ klog.V(4).Infof("delete Pod: [%s/%s] successfully, TidbCluster: %s", ns, podName, tcName)
}
rpc.recordPodEvent("delete", tc, podName, err)
return err
diff --git a/pkg/controller/pv_control.go b/pkg/controller/pv_control.go
index 66d0e2a4e7..c76562560f 100644
--- a/pkg/controller/pv_control.go
+++ b/pkg/controller/pv_control.go
@@ -30,7 +30,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// PVControlInterface manages PVs used in TidbCluster
@@ -98,7 +98,7 @@ func (rpc *realPVControl) UpdateMetaInfo(obj runtime.Object, pv *corev1.Persiste
pvName := pv.GetName()
pvcRef := pv.Spec.ClaimRef
if pvcRef == nil {
- glog.Warningf("PV: [%s] doesn't have a ClaimRef, skipping, %s: %s/%s", kind, pvName, ns, name)
+ klog.Warningf("PV: [%s] doesn't have a ClaimRef, skipping, %s: %s/%s", kind, pvName, ns, name)
return pv, nil
}
@@ -109,7 +109,7 @@ func (rpc *realPVControl) UpdateMetaInfo(obj runtime.Object, pv *corev1.Persiste
return pv, err
}
- glog.Warningf("PV: [%s]'s PVC: [%s/%s] doesn't exist, skipping. %s: %s", pvName, ns, pvcName, kind, name)
+ klog.Warningf("PV: [%s]'s PVC: [%s/%s] doesn't exist, skipping. %s: %s", pvName, ns, pvcName, kind, name)
return pv, nil
}
@@ -128,7 +128,7 @@ func (rpc *realPVControl) UpdateMetaInfo(obj runtime.Object, pv *corev1.Persiste
pv.Labels[label.MemberIDLabelKey] == memberID &&
pv.Labels[label.StoreIDLabelKey] == storeID &&
pv.Annotations[label.AnnPodNameKey] == podName {
- glog.V(4).Infof("pv %s already has labels and annotations synced, skipping. %s: %s/%s", pvName, kind, ns, name)
+ klog.V(4).Infof("pv %s already has labels and annotations synced, skipping. %s: %s/%s", pvName, kind, ns, name)
return pv, nil
}
@@ -150,10 +150,10 @@ func (rpc *realPVControl) UpdateMetaInfo(obj runtime.Object, pv *corev1.Persiste
var updateErr error
updatePV, updateErr = rpc.kubeCli.CoreV1().PersistentVolumes().Update(pv)
if updateErr == nil {
- glog.Infof("PV: [%s] updated successfully, %s: %s/%s", pvName, kind, ns, name)
+ klog.Infof("PV: [%s] updated successfully, %s: %s/%s", pvName, kind, ns, name)
return nil
}
- glog.Errorf("failed to update PV: [%s], %s %s/%s, error: %v", pvName, kind, ns, name, err)
+ klog.Errorf("failed to update PV: [%s], %s %s/%s, error: %v", pvName, kind, ns, name, err)
if updated, err := rpc.pvLister.Get(pvName); err == nil {
// make a copy so we don't mutate the shared cache
diff --git a/pkg/controller/pvc_control.go b/pkg/controller/pvc_control.go
index 2ec2ad51e1..5b16eaadea 100644
--- a/pkg/controller/pvc_control.go
+++ b/pkg/controller/pvc_control.go
@@ -27,7 +27,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// TODO add unit tests
@@ -68,9 +68,9 @@ func (rpc *realPVCControl) DeletePVC(tc *v1alpha1.TidbCluster, pvc *corev1.Persi
pvcName := pvc.GetName()
err := rpc.kubeCli.CoreV1().PersistentVolumeClaims(tc.GetNamespace()).Delete(pvcName, nil)
if err != nil {
- glog.Errorf("failed to delete PVC: [%s/%s], TidbCluster: %s, %v", ns, pvcName, tcName, err)
+ klog.Errorf("failed to delete PVC: [%s/%s], TidbCluster: %s, %v", ns, pvcName, tcName, err)
}
- glog.V(4).Infof("delete PVC: [%s/%s] successfully, TidbCluster: %s", ns, pvcName, tcName)
+ klog.V(4).Infof("delete PVC: [%s/%s] successfully, TidbCluster: %s", ns, pvcName, tcName)
rpc.recordPVCEvent("delete", tc, pvcName, err)
return err
}
@@ -87,10 +87,10 @@ func (rpc *realPVCControl) UpdatePVC(tc *v1alpha1.TidbCluster, pvc *corev1.Persi
var updateErr error
updatePVC, updateErr = rpc.kubeCli.CoreV1().PersistentVolumeClaims(ns).Update(pvc)
if updateErr == nil {
- glog.Infof("update PVC: [%s/%s] successfully, TidbCluster: %s", ns, pvcName, tcName)
+ klog.Infof("update PVC: [%s/%s] successfully, TidbCluster: %s", ns, pvcName, tcName)
return nil
}
- glog.Errorf("failed to update PVC: [%s/%s], TidbCluster: %s, error: %v", ns, pvcName, tcName, updateErr)
+ klog.Errorf("failed to update PVC: [%s/%s], TidbCluster: %s, error: %v", ns, pvcName, tcName, updateErr)
if updated, err := rpc.pvcLister.PersistentVolumeClaims(ns).Get(pvcName); err == nil {
// make a copy so we don't mutate the shared cache
@@ -128,7 +128,7 @@ func (rpc *realPVCControl) UpdateMetaInfo(tc *v1alpha1.TidbCluster, pvc *corev1.
pvc.Labels[label.MemberIDLabelKey] == memberID &&
pvc.Labels[label.StoreIDLabelKey] == storeID &&
pvc.Annotations[label.AnnPodNameKey] == podName {
- glog.V(4).Infof("pvc %s/%s already has labels and annotations synced, skipping, TidbCluster: %s", ns, pvcName, tcName)
+ klog.V(4).Infof("pvc %s/%s already has labels and annotations synced, skipping, TidbCluster: %s", ns, pvcName, tcName)
return pvc, nil
}
@@ -144,10 +144,10 @@ func (rpc *realPVCControl) UpdateMetaInfo(tc *v1alpha1.TidbCluster, pvc *corev1.
var updateErr error
updatePVC, updateErr = rpc.kubeCli.CoreV1().PersistentVolumeClaims(ns).Update(pvc)
if updateErr == nil {
- glog.V(4).Infof("update PVC: [%s/%s] successfully, TidbCluster: %s", ns, pvcName, tcName)
+ klog.V(4).Infof("update PVC: [%s/%s] successfully, TidbCluster: %s", ns, pvcName, tcName)
return nil
}
- glog.Errorf("failed to update PVC: [%s/%s], TidbCluster: %s, error: %v", ns, pvcName, tcName, updateErr)
+ klog.Errorf("failed to update PVC: [%s/%s], TidbCluster: %s, error: %v", ns, pvcName, tcName, updateErr)
if updated, err := rpc.pvcLister.PersistentVolumeClaims(ns).Get(pvcName); err == nil {
// make a copy so we don't mutate the shared cache
diff --git a/pkg/controller/restore/restore_control.go b/pkg/controller/restore/restore_control.go
index bedbf64a09..55f8834d12 100644
--- a/pkg/controller/restore/restore_control.go
+++ b/pkg/controller/restore/restore_control.go
@@ -16,7 +16,9 @@ package restore
import (
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/backup"
+ informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
+ "k8s.io/client-go/tools/cache"
)
// ControlInterface implements the control logic for updating Restore
@@ -39,8 +41,42 @@ type defaultRestoreControl struct {
restoreManager backup.RestoreManager
}
+var _ ControlInterface = &defaultRestoreControl{}
+
// UpdateRestore executes the core logic loop for a Restore.
func (rc *defaultRestoreControl) UpdateRestore(restore *v1alpha1.Restore) error {
restore.SetGroupVersionKind(controller.RestoreControllerKind)
return rc.restoreManager.Sync(restore)
}
+
+// FakeRestoreControl is a fake RestoreControlInterface
+type FakeRestoreControl struct {
+ backupIndexer cache.Indexer
+ updateRestoreTracker controller.RequestTracker
+}
+
+// NewFakeRestoreControl returns a FakeRestoreControl
+func NewFakeRestoreControl(restoreInformer informers.RestoreInformer) *FakeRestoreControl {
+ return &FakeRestoreControl{
+ restoreInformer.Informer().GetIndexer(),
+ controller.RequestTracker{},
+ }
+}
+
+// SetUpdateRestoreError sets the error attributes of updateRestoreTracker
+func (fbc *FakeRestoreControl) SetUpdateRestoreError(err error, after int) {
+ fbc.updateRestoreTracker.SetError(err).SetAfter(after)
+}
+
+// UpdateRestore adds the backup to RestoreIndexer
+func (fbc *FakeRestoreControl) UpdateRestore(backup *v1alpha1.Restore) error {
+ defer fbc.updateRestoreTracker.Inc()
+ if fbc.updateRestoreTracker.ErrorReady() {
+ defer fbc.updateRestoreTracker.Reset()
+ return fbc.updateRestoreTracker.GetError()
+ }
+
+ return fbc.backupIndexer.Add(backup)
+}
+
+var _ ControlInterface = &FakeRestoreControl{}
diff --git a/pkg/controller/restore/restore_control_test.go b/pkg/controller/restore/restore_control_test.go
new file mode 100644
index 0000000000..0eedc92b57
--- /dev/null
+++ b/pkg/controller/restore/restore_control_test.go
@@ -0,0 +1,79 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package restore
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ . "github.com/onsi/gomega"
+ "github.com/pingcap/tidb-operator/pkg/backup/restore"
+ "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/fake"
+ informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions"
+ "k8s.io/client-go/tools/cache"
+)
+
+func TestRestoreControlUpdateRestore(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ tests := []struct {
+ name string
+ syncRestoreManagerErr bool
+ errExpectFn func(*GomegaWithT, error)
+ }{
+ {
+ name: "restore manager sync failed",
+ syncRestoreManagerErr: true,
+ errExpectFn: func(g *GomegaWithT, err error) {
+ g.Expect(err).To(HaveOccurred())
+ g.Expect(strings.Contains(err.Error(), "restore manager sync error")).To(Equal(true))
+ },
+ },
+ {
+ name: "update newly create restore normally",
+ syncRestoreManagerErr: false,
+ errExpectFn: func(g *GomegaWithT, err error) {
+ g.Expect(err).NotTo(HaveOccurred())
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ restore := newRestore()
+ control, restoreIndexer, restoreManager := newFakeRestoreControl()
+
+ restoreIndexer.Add(restore)
+
+ if tt.syncRestoreManagerErr {
+ restoreManager.SetSyncError(fmt.Errorf("restore manager sync error"))
+ }
+
+ err := control.UpdateRestore(restore)
+ if tt.errExpectFn != nil {
+ tt.errExpectFn(g, err)
+ }
+ })
+ }
+}
+
+func newFakeRestoreControl() (ControlInterface, cache.Indexer, *restore.FakeRestoreManager) {
+ cli := &fake.Clientset{}
+
+ restoreInformer := informers.NewSharedInformerFactory(cli, 0).Pingcap().V1alpha1().Restores()
+ restoreManager := restore.NewFakeRestoreManager()
+ control := NewDefaultRestoreControl(restoreManager)
+
+ return control, restoreInformer.Informer().GetIndexer(), restoreManager
+}
diff --git a/pkg/controller/restore/restore_controller.go b/pkg/controller/restore/restore_controller.go
index 3581fbdf37..aaeeb3dce1 100644
--- a/pkg/controller/restore/restore_controller.go
+++ b/pkg/controller/restore/restore_controller.go
@@ -34,7 +34,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// Controller controls restore.
@@ -62,16 +62,16 @@ func NewController(
kubeInformerFactory kubeinformers.SharedInformerFactory,
) *Controller {
eventBroadcaster := record.NewBroadcaster()
- eventBroadcaster.StartLogging(glog.Infof)
+ eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&eventv1.EventSinkImpl{
Interface: eventv1.New(kubeCli.CoreV1().RESTClient()).Events("")})
recorder := eventBroadcaster.NewRecorder(v1alpha1.Scheme, corev1.EventSource{Component: "restore"})
restoreInformer := informerFactory.Pingcap().V1alpha1().Restores()
+ tcInformer := informerFactory.Pingcap().V1alpha1().TidbClusters()
backupInformer := informerFactory.Pingcap().V1alpha1().Backups()
jobInformer := kubeInformerFactory.Batch().V1().Jobs()
pvcInformer := kubeInformerFactory.Core().V1().PersistentVolumeClaims()
- secretInformer := kubeInformerFactory.Core().V1().Secrets()
statusUpdater := controller.NewRealRestoreConditionUpdater(cli, restoreInformer.Lister(), recorder)
jobControl := controller.NewRealJobControl(kubeCli, recorder)
pvcControl := controller.NewRealGeneralPVCControl(kubeCli, recorder)
@@ -83,10 +83,11 @@ func NewController(
restore.NewRestoreManager(
backupInformer.Lister(),
statusUpdater,
- secretInformer.Lister(),
+ kubeCli,
jobInformer.Lister(),
jobControl,
pvcInformer.Lister(),
+ tcInformer.Lister(),
pvcControl,
),
),
@@ -114,8 +115,8 @@ func (rsc *Controller) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer rsc.queue.ShutDown()
- glog.Info("Starting restore controller")
- defer glog.Info("Shutting down restore controller")
+ klog.Info("Starting restore controller")
+ defer klog.Info("Shutting down restore controller")
for i := 0; i < workers; i++ {
go wait.Until(rsc.worker, time.Second, stopCh)
@@ -141,10 +142,10 @@ func (rsc *Controller) processNextWorkItem() bool {
defer rsc.queue.Done(key)
if err := rsc.sync(key.(string)); err != nil {
if perrors.Find(err, controller.IsRequeueError) != nil {
- glog.Infof("Restore: %v, still need sync: %v, requeuing", key.(string), err)
+ klog.Infof("Restore: %v, still need sync: %v, requeuing", key.(string), err)
rsc.queue.AddRateLimited(key)
} else if perrors.Find(err, controller.IsIgnoreError) != nil {
- glog.V(4).Infof("Restore: %v, ignore err: %v", key.(string), err)
+ klog.V(4).Infof("Restore: %v, ignore err: %v", key.(string), err)
} else {
utilruntime.HandleError(fmt.Errorf("Restore: %v, sync failed, err: %v, requeuing", key.(string), err))
rsc.queue.AddRateLimited(key)
@@ -159,7 +160,7 @@ func (rsc *Controller) processNextWorkItem() bool {
func (rsc *Controller) sync(key string) error {
startTime := time.Now()
defer func() {
- glog.V(4).Infof("Finished syncing Restore %q (%v)", key, time.Since(startTime))
+ klog.V(4).Infof("Finished syncing Restore %q (%v)", key, time.Since(startTime))
}()
ns, name, err := cache.SplitMetaNamespaceKey(key)
@@ -168,7 +169,7 @@ func (rsc *Controller) sync(key string) error {
}
restore, err := rsc.restoreLister.Restores(ns).Get(name)
if errors.IsNotFound(err) {
- glog.Infof("Restore has been deleted %v", key)
+ klog.Infof("Restore has been deleted %v", key)
return nil
}
if err != nil {
@@ -188,21 +189,21 @@ func (rsc *Controller) updateRestore(cur interface{}) {
name := newRestore.GetName()
if v1alpha1.IsRestoreInvalid(newRestore) {
- glog.V(4).Infof("restore %s/%s is Invalid, skipping.", ns, name)
+ klog.V(4).Infof("restore %s/%s is Invalid, skipping.", ns, name)
return
}
if v1alpha1.IsRestoreComplete(newRestore) {
- glog.V(4).Infof("restore %s/%s is Complete, skipping.", ns, name)
+ klog.V(4).Infof("restore %s/%s is Complete, skipping.", ns, name)
return
}
if v1alpha1.IsRestoreScheduled(newRestore) {
- glog.V(4).Infof("restore %s/%s is already scheduled, skipping", ns, name)
+ klog.V(4).Infof("restore %s/%s is already scheduled, skipping", ns, name)
return
}
- glog.V(4).Infof("restore object %s/%s enqueue", ns, name)
+ klog.V(4).Infof("restore object %s/%s enqueue", ns, name)
rsc.enqueueRestore(newRestore)
}
diff --git a/pkg/controller/restore/restore_controller_test.go b/pkg/controller/restore/restore_controller_test.go
new file mode 100644
index 0000000000..f32b534f0c
--- /dev/null
+++ b/pkg/controller/restore/restore_controller_test.go
@@ -0,0 +1,261 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package restore
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ . "github.com/onsi/gomega"
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ "github.com/pingcap/tidb-operator/pkg/backup/constants"
+ "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/fake"
+ informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kubeinformers "k8s.io/client-go/informers"
+ kubefake "k8s.io/client-go/kubernetes/fake"
+ "k8s.io/client-go/tools/cache"
+ "k8s.io/utils/pointer"
+)
+
+func TestRestoreControllerEnqueueRestore(t *testing.T) {
+ g := NewGomegaWithT(t)
+ restore := newRestore()
+ rtc, _, _ := newFakeRestoreController()
+ rtc.enqueueRestore(restore)
+ g.Expect(rtc.queue.Len()).To(Equal(1))
+}
+
+func TestRestoreControllerEnqueueRestoreFailed(t *testing.T) {
+ g := NewGomegaWithT(t)
+ rtc, _, _ := newFakeRestoreController()
+ rtc.enqueueRestore(struct{}{})
+ g.Expect(rtc.queue.Len()).To(Equal(0))
+}
+
+func TestRestoreControllerUpdateRestore(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ tests := []struct {
+ name string
+ restoreIsInvalid bool
+ restoreHasBeenCompleted bool
+ restoreHasBeenScheduled bool
+ expectFn func(*GomegaWithT, *Controller)
+ }{
+ {
+ name: "restore is invalid",
+ restoreIsInvalid: true,
+ restoreHasBeenCompleted: false,
+ restoreHasBeenScheduled: false,
+ expectFn: func(g *GomegaWithT, rtc *Controller) {
+ g.Expect(rtc.queue.Len()).To(Equal(0))
+ },
+ },
+ {
+ name: "restore has been completed",
+ restoreIsInvalid: false,
+ restoreHasBeenCompleted: true,
+ restoreHasBeenScheduled: false,
+ expectFn: func(g *GomegaWithT, rtc *Controller) {
+ g.Expect(rtc.queue.Len()).To(Equal(0))
+ },
+ },
+ {
+ name: "restore has been scheduled",
+ restoreIsInvalid: false,
+ restoreHasBeenCompleted: false,
+ restoreHasBeenScheduled: true,
+ expectFn: func(g *GomegaWithT, rtc *Controller) {
+ g.Expect(rtc.queue.Len()).To(Equal(0))
+ },
+ },
+ {
+ name: "restore is newly created",
+ restoreIsInvalid: false,
+ restoreHasBeenCompleted: false,
+ restoreHasBeenScheduled: false,
+ expectFn: func(g *GomegaWithT, rtc *Controller) {
+ g.Expect(rtc.queue.Len()).To(Equal(1))
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+
+ restore := newRestore()
+ rtc, _, _ := newFakeRestoreController()
+
+ if tt.restoreIsInvalid {
+ restore.Status.Conditions = []v1alpha1.RestoreCondition{
+ {
+ Type: v1alpha1.RestoreInvalid,
+ Status: corev1.ConditionTrue,
+ },
+ }
+ }
+
+ if tt.restoreHasBeenCompleted {
+ restore.Status.Conditions = []v1alpha1.RestoreCondition{
+ {
+ Type: v1alpha1.RestoreComplete,
+ Status: corev1.ConditionTrue,
+ },
+ }
+ }
+
+ if tt.restoreHasBeenScheduled {
+ restore.Status.Conditions = []v1alpha1.RestoreCondition{
+ {
+ Type: v1alpha1.RestoreScheduled,
+ Status: corev1.ConditionTrue,
+ },
+ }
+ }
+
+ rtc.updateRestore(restore)
+ if tt.expectFn != nil {
+ tt.expectFn(g, rtc)
+ }
+ })
+ }
+}
+
+func TestRestoreControllerSync(t *testing.T) {
+ g := NewGomegaWithT(t)
+ tests := []struct {
+ name string
+ addRestoreToIndexer bool
+ errWhenUpdateRestore bool
+ invalidKeyFn func(restore *v1alpha1.Restore) string
+ errExpectFn func(*GomegaWithT, error)
+ }{
+ {
+ name: "normal",
+ addRestoreToIndexer: true,
+ errWhenUpdateRestore: false,
+ invalidKeyFn: nil,
+ errExpectFn: func(g *GomegaWithT, err error) {
+ g.Expect(err).NotTo(HaveOccurred())
+ },
+ },
+ {
+ name: "invalid restore key",
+ addRestoreToIndexer: true,
+ errWhenUpdateRestore: false,
+ invalidKeyFn: func(restore *v1alpha1.Restore) string {
+ return fmt.Sprintf("test/demo/%s", restore.GetName())
+ },
+ errExpectFn: func(g *GomegaWithT, err error) {
+ g.Expect(err).To(HaveOccurred())
+ },
+ },
+ {
+ name: "can't found restore",
+ addRestoreToIndexer: false,
+ errWhenUpdateRestore: false,
+ invalidKeyFn: nil,
+ errExpectFn: func(g *GomegaWithT, err error) {
+ g.Expect(err).NotTo(HaveOccurred())
+ },
+ },
+ {
+ name: "update restore failed",
+ addRestoreToIndexer: true,
+ errWhenUpdateRestore: true,
+ invalidKeyFn: nil,
+ errExpectFn: func(g *GomegaWithT, err error) {
+ g.Expect(err).To(HaveOccurred())
+ g.Expect(strings.Contains(err.Error(), "update restore failed")).To(Equal(true))
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ restore := newRestore()
+ rtc, restoreIndexer, restoreControl := newFakeRestoreController()
+
+ if tt.addRestoreToIndexer {
+ err := restoreIndexer.Add(restore)
+ g.Expect(err).NotTo(HaveOccurred())
+ }
+
+ key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(restore)
+ if tt.invalidKeyFn != nil {
+ key = tt.invalidKeyFn(restore)
+ }
+
+ if tt.errWhenUpdateRestore {
+ restoreControl.SetUpdateRestoreError(fmt.Errorf("update restore failed"), 0)
+ }
+
+ err := rtc.sync(key)
+
+ if tt.errExpectFn != nil {
+ tt.errExpectFn(g, err)
+ }
+ })
+ }
+}
+
+func newFakeRestoreController() (*Controller, cache.Indexer, *FakeRestoreControl) {
+ cli := fake.NewSimpleClientset()
+ kubeCli := kubefake.NewSimpleClientset()
+ informerFactory := informers.NewSharedInformerFactory(cli, 0)
+ kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeCli, 0)
+
+ restoreInformer := informerFactory.Pingcap().V1alpha1().Restores()
+ restoreControl := NewFakeRestoreControl(restoreInformer)
+
+ rtc := NewController(kubeCli, cli, informerFactory, kubeInformerFactory)
+ rtc.control = restoreControl
+
+ return rtc, restoreInformer.Informer().GetIndexer(), restoreControl
+}
+
+func newRestore() *v1alpha1.Restore {
+ return &v1alpha1.Restore{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Restore",
+ APIVersion: "pingcap.com/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-restore",
+ Namespace: corev1.NamespaceDefault,
+ UID: "test-rt",
+ },
+ Spec: v1alpha1.RestoreSpec{
+ To: v1alpha1.TiDBAccessConfig{
+ Host: "10.1.1.2",
+ Port: constants.DefaultTidbPort,
+ User: constants.DefaultTidbUser,
+ SecretName: "demo1-tidb-secret",
+ },
+ StorageProvider: v1alpha1.StorageProvider{
+ S3: &v1alpha1.S3StorageProvider{
+ Provider: v1alpha1.S3StorageProviderTypeCeph,
+ Endpoint: "http://10.0.0.1",
+ Bucket: "test1-demo1",
+ SecretName: "demo",
+ },
+ },
+ StorageClassName: pointer.StringPtr("local-storage"),
+ StorageSize: "1Gi",
+ },
+ }
+}
diff --git a/pkg/controller/restore_status_updater.go b/pkg/controller/restore_status_updater.go
index 486f146c3c..591cc3b075 100644
--- a/pkg/controller/restore_status_updater.go
+++ b/pkg/controller/restore_status_updater.go
@@ -17,7 +17,7 @@ import (
"fmt"
"strings"
- glog "k8s.io/klog"
+ "k8s.io/klog"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/client/clientset/versioned"
@@ -63,7 +63,7 @@ func (rcu *realRestoreConditionUpdater) Update(restore *v1alpha1.Restore, condit
if isUpdate {
_, updateErr := rcu.cli.PingcapV1alpha1().Restores(ns).Update(restore)
if updateErr == nil {
- glog.Infof("Restore: [%s/%s] updated successfully", ns, restoreName)
+ klog.Infof("Restore: [%s/%s] updated successfully", ns, restoreName)
return nil
}
if updated, err := rcu.restoreLister.Restores(ns).Get(restoreName); err == nil {
diff --git a/pkg/controller/secret_control.go b/pkg/controller/secret_control.go
index 7392c4f9ea..ebe4db6155 100644
--- a/pkg/controller/secret_control.go
+++ b/pkg/controller/secret_control.go
@@ -17,98 +17,65 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
- "fmt"
- "github.com/pingcap/tidb-operator/pkg/label"
certutil "github.com/pingcap/tidb-operator/pkg/util/crypto"
- corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- types "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
- corelisters "k8s.io/client-go/listers/core/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// SecretControlInterface manages certificates used by TiDB clusters
type SecretControlInterface interface {
- Create(or metav1.OwnerReference, certOpts *TiDBClusterCertOptions, cert []byte, key []byte) error
Load(ns string, secretName string) ([]byte, []byte, error)
Check(ns string, secretName string) bool
}
type realSecretControl struct {
- kubeCli kubernetes.Interface
- secretLister corelisters.SecretLister
+ kubeCli kubernetes.Interface
}
// NewRealSecretControl creates a new SecretControlInterface
func NewRealSecretControl(
kubeCli kubernetes.Interface,
- secretLister corelisters.SecretLister,
) SecretControlInterface {
return &realSecretControl{
- kubeCli: kubeCli,
- secretLister: secretLister,
+ kubeCli: kubeCli,
}
}
-func (rsc *realSecretControl) Create(or metav1.OwnerReference, certOpts *TiDBClusterCertOptions, cert []byte, key []byte) error {
- secretName := fmt.Sprintf("%s-%s", certOpts.Instance, certOpts.Suffix)
-
- secretLabel := label.New().Instance(certOpts.Instance).
- Component(certOpts.Component).Labels()
-
- secret := &corev1.Secret{
- ObjectMeta: types.ObjectMeta{
- Name: secretName,
- Labels: secretLabel,
- OwnerReferences: []metav1.OwnerReference{or},
- },
- Data: map[string][]byte{
- "cert": cert,
- "key": key,
- },
- }
-
- _, err := rsc.kubeCli.CoreV1().Secrets(certOpts.Namespace).Create(secret)
- if err == nil {
- glog.Infof("save cert to secret %s/%s", certOpts.Namespace, secretName)
- }
- return err
-}
-
// Load loads cert and key from Secret matching the name
func (rsc *realSecretControl) Load(ns string, secretName string) ([]byte, []byte, error) {
- secret, err := rsc.secretLister.Secrets(ns).Get(secretName)
+ secret, err := rsc.kubeCli.CoreV1().Secrets(ns).Get(secretName, metav1.GetOptions{})
if err != nil {
return nil, nil, err
}
- return secret.Data["cert"], secret.Data["key"], nil
+ return secret.Data[v1.TLSCertKey], secret.Data[v1.TLSPrivateKeyKey], nil
}
// Check returns true if the secret already exist
func (rsc *realSecretControl) Check(ns string, secretName string) bool {
certBytes, keyBytes, err := rsc.Load(ns, secretName)
if err != nil {
- glog.Errorf("certificate validation failed for [%s/%s], error loading cert from secret, %v", ns, secretName, err)
+ klog.Errorf("certificate validation failed for [%s/%s], error loading cert from secret, %v", ns, secretName, err)
return false
}
// validate if the certificate is valid
block, _ := pem.Decode(certBytes)
if block == nil {
- glog.Errorf("certificate validation failed for [%s/%s], can not decode cert to PEM", ns, secretName)
+ klog.Errorf("certificate validation failed for [%s/%s], can not decode cert to PEM", ns, secretName)
return false
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
- glog.Errorf("certificate validation failed for [%s/%s], can not parse cert, %v", ns, secretName, err)
+ klog.Errorf("certificate validation failed for [%s/%s], can not parse cert, %v", ns, secretName, err)
return false
}
rootCAs, err := certutil.ReadCACerts()
if err != nil {
- glog.Errorf("certificate validation failed for [%s/%s], error loading CAs, %v", ns, secretName, err)
+ klog.Errorf("certificate validation failed for [%s/%s], error loading CAs, %v", ns, secretName, err)
return false
}
@@ -121,14 +88,14 @@ func (rsc *realSecretControl) Check(ns string, secretName string) bool {
}
_, err = cert.Verify(verifyOpts)
if err != nil {
- glog.Errorf("certificate validation failed for [%s/%s], %v", ns, secretName, err)
+ klog.Errorf("certificate validation failed for [%s/%s], %v", ns, secretName, err)
return false
}
// validate if the certificate and private key matches
_, err = tls.X509KeyPair(certBytes, keyBytes)
if err != nil {
- glog.Errorf("certificate validation failed for [%s/%s], error loading key pair, %v", ns, secretName, err)
+ klog.Errorf("certificate validation failed for [%s/%s], error loading key pair, %v", ns, secretName, err)
return false
}
@@ -143,10 +110,8 @@ type FakeSecretControl struct {
func NewFakeSecretControl(
kubeCli kubernetes.Interface,
- secretLister corelisters.SecretLister,
) SecretControlInterface {
return &realSecretControl{
- kubeCli: kubeCli,
- secretLister: secretLister,
+ kubeCli: kubeCli,
}
}
diff --git a/pkg/controller/service_control.go b/pkg/controller/service_control.go
index 1d858c1a0f..5a0dd1cf56 100644
--- a/pkg/controller/service_control.go
+++ b/pkg/controller/service_control.go
@@ -29,7 +29,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// ExternalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints.
@@ -74,7 +74,7 @@ func (sc *realServiceControl) UpdateService(tc *v1alpha1.TidbCluster, svc *corev
var updateErr error
updateSvc, updateErr = sc.kubeCli.CoreV1().Services(ns).Update(svc)
if updateErr == nil {
- glog.Infof("update Service: [%s/%s] successfully, TidbCluster: %s", ns, svcName, tcName)
+ klog.Infof("update Service: [%s/%s] successfully, TidbCluster: %s", ns, svcName, tcName)
return nil
}
diff --git a/pkg/controller/stateful_set_control.go b/pkg/controller/stateful_set_control.go
index 7d4a427487..cbe83a02e2 100644
--- a/pkg/controller/stateful_set_control.go
+++ b/pkg/controller/stateful_set_control.go
@@ -30,7 +30,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// StatefulSetControlInterface defines the interface that uses to create, update, and delete StatefulSets,
@@ -78,10 +78,10 @@ func (sc *realStatefulSetControl) UpdateStatefulSet(tc *v1alpha1.TidbCluster, se
var updateErr error
updatedSS, updateErr = sc.kubeCli.AppsV1().StatefulSets(ns).Update(set)
if updateErr == nil {
- glog.Infof("TidbCluster: [%s/%s]'s StatefulSet: [%s/%s] updated successfully", ns, tcName, ns, setName)
+ klog.Infof("TidbCluster: [%s/%s]'s StatefulSet: [%s/%s] updated successfully", ns, tcName, ns, setName)
return nil
}
- glog.Errorf("failed to update TidbCluster: [%s/%s]'s StatefulSet: [%s/%s], error: %v", ns, tcName, ns, setName, updateErr)
+ klog.Errorf("failed to update TidbCluster: [%s/%s]'s StatefulSet: [%s/%s], error: %v", ns, tcName, ns, setName, updateErr)
if updated, err := sc.setLister.StatefulSets(ns).Get(setName); err == nil {
// make a copy so we don't mutate the shared cache
diff --git a/pkg/controller/tidb_control.go b/pkg/controller/tidb_control.go
index 75c6ac975c..ebe03fb1e7 100644
--- a/pkg/controller/tidb_control.go
+++ b/pkg/controller/tidb_control.go
@@ -15,16 +15,19 @@ package controller
import (
"crypto/tls"
+ "crypto/x509"
"encoding/json"
"fmt"
- "io/ioutil"
- "net/http"
- "time"
-
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/httputil"
- certutil "github.com/pingcap/tidb-operator/pkg/util/crypto"
+ "github.com/pingcap/tidb-operator/pkg/util"
"github.com/pingcap/tidb/config"
+ "io/ioutil"
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+ "net/http"
+ "time"
)
const (
@@ -51,24 +54,45 @@ type TiDBControlInterface interface {
// defaultTiDBControl is default implementation of TiDBControlInterface.
type defaultTiDBControl struct {
httpClient *http.Client
+ kubeCli kubernetes.Interface
}
// NewDefaultTiDBControl returns a defaultTiDBControl instance
-func NewDefaultTiDBControl() TiDBControlInterface {
- return &defaultTiDBControl{httpClient: &http.Client{Timeout: timeout}}
+func NewDefaultTiDBControl(kubeCli kubernetes.Interface) TiDBControlInterface {
+ return &defaultTiDBControl{httpClient: &http.Client{Timeout: timeout}, kubeCli: kubeCli}
}
-func (tdc *defaultTiDBControl) useTLSHTTPClient(enableTLS bool) error {
- if enableTLS {
- rootCAs, err := certutil.ReadCACerts()
- if err != nil {
- return err
- }
- config := &tls.Config{
- RootCAs: rootCAs,
- }
- tdc.httpClient.Transport = &http.Transport{TLSClientConfig: config}
+func (tdc *defaultTiDBControl) useTLSHTTPClient(tc *v1alpha1.TidbCluster) error {
+ if !tc.IsTLSClusterEnabled() {
+ return nil
+ }
+
+ tcName := tc.Name
+ ns := tc.Namespace
+ secretName := util.ClusterClientTLSSecretName(tcName)
+ secret, err := tdc.kubeCli.CoreV1().Secrets(ns).Get(secretName, metav1.GetOptions{})
+ if err != nil {
+ return err
+ }
+
+ clientCert, certExists := secret.Data[v1.TLSCertKey]
+ clientKey, keyExists := secret.Data[v1.TLSPrivateKeyKey]
+ if !certExists || !keyExists {
+ return fmt.Errorf("cert or key does not exist in secret %s/%s", ns, secretName)
+ }
+
+ tlsCert, err := tls.X509KeyPair(clientCert, clientKey)
+ if err != nil {
+ return fmt.Errorf("unable to load certificates from secret %s/%s: %v", ns, secretName, err)
+ }
+
+ rootCAs := x509.NewCertPool()
+ rootCAs.AppendCertsFromPEM(secret.Data[v1.ServiceAccountRootCAKey])
+ config := &tls.Config{
+ RootCAs: rootCAs,
+ Certificates: []tls.Certificate{tlsCert},
}
+ tdc.httpClient.Transport = &http.Transport{TLSClientConfig: config}
return nil
}
@@ -77,7 +101,7 @@ func (tdc *defaultTiDBControl) GetHealth(tc *v1alpha1.TidbCluster, ordinal int32
ns := tc.GetNamespace()
scheme := tc.Scheme()
- if err := tdc.useTLSHTTPClient(tc.IsTLSClusterEnabled()); err != nil {
+ if err := tdc.useTLSHTTPClient(tc); err != nil {
return false, err
}
@@ -91,7 +115,7 @@ func (tdc *defaultTiDBControl) GetInfo(tc *v1alpha1.TidbCluster, ordinal int32)
tcName := tc.GetName()
ns := tc.GetNamespace()
scheme := tc.Scheme()
- if err := tdc.useTLSHTTPClient(tc.IsTLSClusterEnabled()); err != nil {
+ if err := tdc.useTLSHTTPClient(tc); err != nil {
return nil, err
}
@@ -126,7 +150,7 @@ func (tdc *defaultTiDBControl) GetSettings(tc *v1alpha1.TidbCluster, ordinal int
tcName := tc.GetName()
ns := tc.GetNamespace()
scheme := tc.Scheme()
- if err := tdc.useTLSHTTPClient(tc.IsTLSClusterEnabled()); err != nil {
+ if err := tdc.useTLSHTTPClient(tc); err != nil {
return nil, err
}
diff --git a/pkg/controller/tidbcluster/tidb_cluster_control.go b/pkg/controller/tidbcluster/tidb_cluster_control.go
index fa217e7d81..8671f7b5a9 100644
--- a/pkg/controller/tidbcluster/tidb_cluster_control.go
+++ b/pkg/controller/tidbcluster/tidb_cluster_control.go
@@ -15,6 +15,7 @@ package tidbcluster
import (
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1/defaulting"
v1alpha1validation "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1/validation"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/manager"
@@ -46,6 +47,7 @@ func NewDefaultTidbClusterControl(
orphanPodsCleaner member.OrphanPodsCleaner,
pvcCleaner member.PVCCleanerInterface,
pumpMemberManager manager.Manager,
+ tiflashMemberManager manager.Manager,
discoveryManager member.TidbDiscoveryManager,
podRestarter member.PodRestarter,
recorder record.EventRecorder) ControlInterface {
@@ -59,6 +61,7 @@ func NewDefaultTidbClusterControl(
orphanPodsCleaner,
pvcCleaner,
pumpMemberManager,
+ tiflashMemberManager,
discoveryManager,
podRestarter,
recorder,
@@ -75,6 +78,7 @@ type defaultTidbClusterControl struct {
orphanPodsCleaner member.OrphanPodsCleaner
pvcCleaner member.PVCCleanerInterface
pumpMemberManager manager.Manager
+ tiflashMemberManager manager.Manager
discoveryManager member.TidbDiscoveryManager
podRestarter member.PodRestarter
recorder record.EventRecorder
@@ -82,6 +86,7 @@ type defaultTidbClusterControl struct {
// UpdateStatefulSet executes the core logic loop for a tidbcluster.
func (tcc *defaultTidbClusterControl) UpdateTidbCluster(tc *v1alpha1.TidbCluster) error {
+ tcc.defaulting(tc)
if !tcc.validate(tc) {
return nil // fatal error, no need to retry on invalid object
}
@@ -113,6 +118,10 @@ func (tcc *defaultTidbClusterControl) validate(tc *v1alpha1.TidbCluster) bool {
return true
}
+func (tcc *defaultTidbClusterControl) defaulting(tc *v1alpha1.TidbCluster) {
+ defaulting.SetTidbClusterDefault(tc)
+}
+
func (tcc *defaultTidbClusterControl) updateTidbCluster(tc *v1alpha1.TidbCluster) error {
// syncing all PVs managed by operator's reclaim policy to Retain
if err := tcc.reclaimPolicyManager.Sync(tc); err != nil {
@@ -174,6 +183,19 @@ func (tcc *defaultTidbClusterControl) updateTidbCluster(tc *v1alpha1.TidbCluster
return err
}
+ // works that should do to making the tiflash cluster current state match the desired state:
+ // - waiting for the tidb cluster available
+ // - create or update tiflash headless service
+ // - create the tiflash statefulset
+ // - sync tiflash cluster status from pd to TidbCluster object
+ // - set scheduler labels to tiflash stores
+ // - upgrade the tiflash cluster
+ // - scale out/in the tiflash cluster
+ // - failover the tiflash cluster
+ if err := tcc.tiflashMemberManager.Sync(tc); err != nil {
+ return err
+ }
+
// syncing the labels from Pod to PVC and PV, these labels include:
// - label.StoreIDLabelKey
// - label.MemberIDLabelKey
diff --git a/pkg/controller/tidbcluster/tidb_cluster_control_test.go b/pkg/controller/tidbcluster/tidb_cluster_control_test.go
index f12593cb92..41a73f3a51 100644
--- a/pkg/controller/tidbcluster/tidb_cluster_control_test.go
+++ b/pkg/controller/tidbcluster/tidb_cluster_control_test.go
@@ -316,6 +316,7 @@ func newFakeTidbClusterControl() (
orphanPodCleaner := mm.NewFakeOrphanPodsCleaner()
pvcCleaner := mm.NewFakePVCCleaner()
pumpMemberManager := mm.NewFakePumpMemberManager()
+ tiflashMemberManager := mm.NewFakeTiFlashMemberManager()
discoveryManager := mm.NewFakeDiscoveryManger()
podRestarter := mm.NewFakePodRestarter()
control := NewDefaultTidbClusterControl(
@@ -328,6 +329,7 @@ func newFakeTidbClusterControl() (
orphanPodCleaner,
pvcCleaner,
pumpMemberManager,
+ tiflashMemberManager,
discoveryManager,
podRestarter,
recorder,
diff --git a/pkg/controller/tidbcluster/tidb_cluster_controller.go b/pkg/controller/tidbcluster/tidb_cluster_controller.go
index 60f7a78d89..a9f3815498 100644
--- a/pkg/controller/tidbcluster/tidb_cluster_controller.go
+++ b/pkg/controller/tidbcluster/tidb_cluster_controller.go
@@ -39,7 +39,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
- glog "k8s.io/klog"
+ "k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -75,9 +75,10 @@ func NewController(
pdFailoverPeriod time.Duration,
tikvFailoverPeriod time.Duration,
tidbFailoverPeriod time.Duration,
+ tiflashFailoverPeriod time.Duration,
) *Controller {
eventBroadcaster := record.NewBroadcasterWithCorrelatorOptions(record.CorrelatorOptions{QPS: 1})
- eventBroadcaster.StartLogging(glog.V(2).Infof)
+ eventBroadcaster.StartLogging(klog.V(2).Infof)
eventBroadcaster.StartRecordingToSink(&eventv1.EventSinkImpl{
Interface: eventv1.New(kubeCli.CoreV1().RESTClient()).Events("")})
recorder := eventBroadcaster.NewRecorder(v1alpha1.Scheme, corev1.EventSource{Component: "tidb-controller-manager"})
@@ -90,29 +91,27 @@ func NewController(
pvInformer := kubeInformerFactory.Core().V1().PersistentVolumes()
podInformer := kubeInformerFactory.Core().V1().Pods()
nodeInformer := kubeInformerFactory.Core().V1().Nodes()
- csrInformer := kubeInformerFactory.Certificates().V1beta1().CertificateSigningRequests()
- secretInformer := kubeInformerFactory.Core().V1().Secrets()
- cmInformer := kubeInformerFactory.Core().V1().ConfigMaps()
tcControl := controller.NewRealTidbClusterControl(cli, tcInformer.Lister(), recorder)
pdControl := pdapi.NewDefaultPDControl(kubeCli)
- tidbControl := controller.NewDefaultTiDBControl()
- cmControl := controller.NewRealConfigMapControl(kubeCli, cmInformer.Lister(), recorder)
+ tidbControl := controller.NewDefaultTiDBControl(kubeCli)
+ cmControl := controller.NewRealConfigMapControl(kubeCli, recorder)
setControl := controller.NewRealStatefuSetControl(kubeCli, setInformer.Lister(), recorder)
svcControl := controller.NewRealServiceControl(kubeCli, svcInformer.Lister(), recorder)
pvControl := controller.NewRealPVControl(kubeCli, pvcInformer.Lister(), pvInformer.Lister(), recorder)
pvcControl := controller.NewRealPVCControl(kubeCli, recorder, pvcInformer.Lister())
podControl := controller.NewRealPodControl(kubeCli, pdControl, podInformer.Lister(), recorder)
- secControl := controller.NewRealSecretControl(kubeCli, secretInformer.Lister())
- certControl := controller.NewRealCertControl(kubeCli, csrInformer.Lister(), secControl)
typedControl := controller.NewTypedControl(controller.NewRealGenericControl(genericCli, recorder))
pdScaler := mm.NewPDScaler(pdControl, pvcInformer.Lister(), pvcControl)
tikvScaler := mm.NewTiKVScaler(pdControl, pvcInformer.Lister(), pvcControl, podInformer.Lister())
+ tiflashScaler := mm.NewTiFlashScaler(pdControl, pvcInformer.Lister(), pvcControl, podInformer.Lister())
pdFailover := mm.NewPDFailover(cli, pdControl, pdFailoverPeriod, podInformer.Lister(), podControl, pvcInformer.Lister(), pvcControl, pvInformer.Lister(), recorder)
- tikvFailover := mm.NewTiKVFailover(tikvFailoverPeriod)
- tidbFailover := mm.NewTiDBFailover(tidbFailoverPeriod)
+ tikvFailover := mm.NewTiKVFailover(tikvFailoverPeriod, recorder)
+ tidbFailover := mm.NewTiDBFailover(tidbFailoverPeriod, recorder)
+ tiflashFailover := mm.NewTiFlashFailover(tiflashFailoverPeriod)
pdUpgrader := mm.NewPDUpgrader(pdControl, podControl, podInformer.Lister())
tikvUpgrader := mm.NewTiKVUpgrader(pdControl, podControl, podInformer.Lister())
+ tiflashUpgrader := mm.NewTiFlashUpgrader(pdControl, podControl, podInformer.Lister())
tidbUpgrader := mm.NewTiDBUpgrader(tidbControl, podInformer.Lister())
podRestarter := mm.NewPodRestarter(kubeCli, podInformer.Lister())
@@ -126,7 +125,6 @@ func NewController(
setControl,
svcControl,
podControl,
- certControl,
typedControl,
setInformer.Lister(),
svcInformer.Lister(),
@@ -142,7 +140,6 @@ func NewController(
pdControl,
setControl,
svcControl,
- certControl,
typedControl,
setInformer.Lister(),
svcInformer.Lister(),
@@ -157,12 +154,10 @@ func NewController(
setControl,
svcControl,
tidbControl,
- certControl,
typedControl,
setInformer.Lister(),
svcInformer.Lister(),
podInformer.Lister(),
- cmInformer.Lister(),
tidbUpgrader,
autoFailover,
tidbFailover,
@@ -201,9 +196,22 @@ func NewController(
cmControl,
setInformer.Lister(),
svcInformer.Lister(),
- cmInformer.Lister(),
podInformer.Lister(),
),
+ mm.NewTiFlashMemberManager(
+ pdControl,
+ setControl,
+ svcControl,
+ typedControl,
+ setInformer.Lister(),
+ svcInformer.Lister(),
+ podInformer.Lister(),
+ nodeInformer.Lister(),
+ autoFailover,
+ tiflashFailover,
+ tiflashScaler,
+ tiflashUpgrader,
+ ),
mm.NewTidbDiscoveryManager(typedControl),
podRestarter,
recorder,
@@ -242,8 +250,8 @@ func (tcc *Controller) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer tcc.queue.ShutDown()
- glog.Info("Starting tidbcluster controller")
- defer glog.Info("Shutting down tidbcluster controller")
+ klog.Info("Starting tidbcluster controller")
+ defer klog.Info("Shutting down tidbcluster controller")
for i := 0; i < workers; i++ {
go wait.Until(tcc.worker, time.Second, stopCh)
@@ -269,7 +277,7 @@ func (tcc *Controller) processNextWorkItem() bool {
defer tcc.queue.Done(key)
if err := tcc.sync(key.(string)); err != nil {
if perrors.Find(err, controller.IsRequeueError) != nil {
- glog.Infof("TidbCluster: %v, still need sync: %v, requeuing", key.(string), err)
+ klog.Infof("TidbCluster: %v, still need sync: %v, requeuing", key.(string), err)
} else {
utilruntime.HandleError(fmt.Errorf("TidbCluster: %v, sync failed %v, requeuing", key.(string), err))
}
@@ -284,7 +292,7 @@ func (tcc *Controller) processNextWorkItem() bool {
func (tcc *Controller) sync(key string) error {
startTime := time.Now()
defer func() {
- glog.V(4).Infof("Finished syncing TidbCluster %q (%v)", key, time.Since(startTime))
+ klog.V(4).Infof("Finished syncing TidbCluster %q (%v)", key, time.Since(startTime))
}()
ns, name, err := cache.SplitMetaNamespaceKey(key)
@@ -293,7 +301,7 @@ func (tcc *Controller) sync(key string) error {
}
tc, err := tcc.tcLister.TidbClusters(ns).Get(name)
if errors.IsNotFound(err) {
- glog.Infof("TidbCluster has been deleted %v", key)
+ klog.Infof("TidbCluster has been deleted %v", key)
return nil
}
if err != nil {
@@ -335,7 +343,7 @@ func (tcc *Controller) addStatefulSet(obj interface{}) {
if tc == nil {
return
}
- glog.V(4).Infof("StatefuSet %s/%s created, TidbCluster: %s/%s", ns, setName, ns, tc.Name)
+ klog.V(4).Infof("StatefuSet %s/%s created, TidbCluster: %s/%s", ns, setName, ns, tc.Name)
tcc.enqueueTidbCluster(tc)
}
@@ -356,7 +364,7 @@ func (tcc *Controller) updateStatefuSet(old, cur interface{}) {
if tc == nil {
return
}
- glog.V(4).Infof("StatefulSet %s/%s updated, %+v -> %+v.", ns, setName, oldSet.Spec, curSet.Spec)
+ klog.V(4).Infof("StatefulSet %s/%s updated, %+v -> %+v.", ns, setName, oldSet.Spec, curSet.Spec)
tcc.enqueueTidbCluster(tc)
}
@@ -387,7 +395,7 @@ func (tcc *Controller) deleteStatefulSet(obj interface{}) {
if tc == nil {
return
}
- glog.V(4).Infof("StatefulSet %s/%s deleted through %v.", ns, setName, utilruntime.GetCaller())
+ klog.V(4).Infof("StatefulSet %s/%s deleted through %v.", ns, setName, utilruntime.GetCaller())
tcc.enqueueTidbCluster(tc)
}
diff --git a/pkg/controller/tidbcluster/tidb_cluster_controller_test.go b/pkg/controller/tidbcluster/tidb_cluster_controller_test.go
index f666195dd9..a3639023be 100644
--- a/pkg/controller/tidbcluster/tidb_cluster_controller_test.go
+++ b/pkg/controller/tidbcluster/tidb_cluster_controller_test.go
@@ -281,6 +281,7 @@ func newFakeTidbClusterController() (*Controller, cache.Indexer, *FakeTidbCluste
5*time.Minute,
5*time.Minute,
5*time.Minute,
+ 5*time.Minute,
)
tcc.tcListerSynced = alwaysReady
tcc.setListerSynced = alwaysReady
diff --git a/pkg/controller/tidbcluster_control.go b/pkg/controller/tidbcluster_control.go
index 268f7769ea..2f5f7827a4 100644
--- a/pkg/controller/tidbcluster_control.go
+++ b/pkg/controller/tidbcluster_control.go
@@ -28,7 +28,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// TidbClusterControlInterface manages TidbClusters
@@ -65,10 +65,10 @@ func (rtc *realTidbClusterControl) UpdateTidbCluster(tc *v1alpha1.TidbCluster, n
var updateErr error
updateTC, updateErr = rtc.cli.PingcapV1alpha1().TidbClusters(ns).Update(tc)
if updateErr == nil {
- glog.Infof("TidbCluster: [%s/%s] updated successfully", ns, tcName)
+ klog.Infof("TidbCluster: [%s/%s] updated successfully", ns, tcName)
return nil
}
- glog.Errorf("failed to update TidbCluster: [%s/%s], error: %v", ns, tcName, updateErr)
+ klog.Errorf("failed to update TidbCluster: [%s/%s], error: %v", ns, tcName, updateErr)
if updated, err := rtc.tcLister.TidbClusters(ns).Get(tcName); err == nil {
// make a copy so we don't mutate the shared cache
diff --git a/pkg/controller/tidbinitializer/tidb_initializer_controller.go b/pkg/controller/tidbinitializer/tidb_initializer_controller.go
index b7ed97b847..58becb489d 100644
--- a/pkg/controller/tidbinitializer/tidb_initializer_controller.go
+++ b/pkg/controller/tidbinitializer/tidb_initializer_controller.go
@@ -64,6 +64,7 @@ func NewController(
recorder := eventBroadcaster.NewRecorder(v1alpha1.Scheme, corev1.EventSource{Component: "tidbinitializer"})
tidbInitializerInformer := informerFactory.Pingcap().V1alpha1().TidbInitializers()
+ tidbClusterInformer := informerFactory.Pingcap().V1alpha1().TidbClusters()
jobInformer := kubeInformerFactory.Batch().V1().Jobs()
typedControl := controller.NewTypedControl(controller.NewRealGenericControl(genericCli, recorder))
@@ -75,6 +76,7 @@ func NewController(
jobInformer.Lister(),
genericCli,
tidbInitializerInformer.Lister(),
+ tidbClusterInformer.Lister(),
typedControl,
),
),
diff --git a/pkg/discovery/discovery.go b/pkg/discovery/discovery.go
index b06ecab75d..f8f6c21ab7 100644
--- a/pkg/discovery/discovery.go
+++ b/pkg/discovery/discovery.go
@@ -21,11 +21,10 @@ import (
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/client/clientset/versioned"
- "github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/pdapi"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// TiDBDiscovery helps new PD member to discover all other members in cluster bootstrap phase.
@@ -34,12 +33,11 @@ type TiDBDiscovery interface {
}
type tidbDiscovery struct {
- cli versioned.Interface
- certControl controller.CertControlInterface
- lock sync.Mutex
- clusters map[string]*clusterInfo
- tcGetFn func(ns, tcName string) (*v1alpha1.TidbCluster, error)
- pdControl pdapi.PDControlInterface
+ cli versioned.Interface
+ lock sync.Mutex
+ clusters map[string]*clusterInfo
+ tcGetFn func(ns, tcName string) (*v1alpha1.TidbCluster, error)
+ pdControl pdapi.PDControlInterface
}
type clusterInfo struct {
@@ -65,7 +63,7 @@ func (td *tidbDiscovery) Discover(advertisePeerUrl string) (string, error) {
if advertisePeerUrl == "" {
return "", fmt.Errorf("advertisePeerUrl is empty")
}
- glog.Infof("advertisePeerUrl is: %s", advertisePeerUrl)
+ klog.Infof("advertisePeerUrl is: %s", advertisePeerUrl)
strArr := strings.Split(advertisePeerUrl, ".")
if len(strArr) != 4 {
return "", fmt.Errorf("advertisePeerUrl format is wrong: %s", advertisePeerUrl)
diff --git a/pkg/discovery/server/mux.go b/pkg/discovery/server/mux.go
index e52f60084e..a929ce559e 100644
--- a/pkg/discovery/server/mux.go
+++ b/pkg/discovery/server/mux.go
@@ -23,7 +23,7 @@ import (
"github.com/pingcap/tidb-operator/pkg/client/clientset/versioned"
"github.com/pingcap/tidb-operator/pkg/discovery"
"k8s.io/client-go/kubernetes"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
type server struct {
@@ -38,17 +38,17 @@ func StartServer(cli versioned.Interface, kubeCli kubernetes.Interface, port int
ws.Route(ws.GET("/new/{advertise-peer-url}").To(svr.newHandler))
restful.Add(ws)
- glog.Infof("starting TiDB Discovery server, listening on 0.0.0.0:%d", port)
- glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), nil))
+ klog.Infof("starting TiDB Discovery server, listening on 0.0.0.0:%d", port)
+ klog.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), nil))
}
func (svr *server) newHandler(req *restful.Request, resp *restful.Response) {
encodedAdvertisePeerURL := req.PathParameter("advertise-peer-url")
data, err := base64.StdEncoding.DecodeString(encodedAdvertisePeerURL)
if err != nil {
- glog.Errorf("failed to decode advertise-peer-url: %s", encodedAdvertisePeerURL)
+ klog.Errorf("failed to decode advertise-peer-url: %s", encodedAdvertisePeerURL)
if err := resp.WriteError(http.StatusInternalServerError, err); err != nil {
- glog.Errorf("failed to writeError: %v", err)
+ klog.Errorf("failed to writeError: %v", err)
}
return
}
@@ -56,15 +56,15 @@ func (svr *server) newHandler(req *restful.Request, resp *restful.Response) {
result, err := svr.discovery.Discover(advertisePeerURL)
if err != nil {
- glog.Errorf("failed to discover: %s, %v", advertisePeerURL, err)
+ klog.Errorf("failed to discover: %s, %v", advertisePeerURL, err)
if err := resp.WriteError(http.StatusInternalServerError, err); err != nil {
- glog.Errorf("failed to writeError: %v", err)
+ klog.Errorf("failed to writeError: %v", err)
}
return
}
- glog.Infof("generated args for %s: %s", advertisePeerURL, result)
+ klog.Infof("generated args for %s: %s", advertisePeerURL, result)
if _, err := io.WriteString(resp, result); err != nil {
- glog.Errorf("failed to writeString: %s, %v", result, err)
+ klog.Errorf("failed to writeString: %s, %v", result, err)
}
}
diff --git a/pkg/httputil/httputil.go b/pkg/httputil/httputil.go
index e884746f92..7e1b303add 100644
--- a/pkg/httputil/httputil.go
+++ b/pkg/httputil/httputil.go
@@ -19,7 +19,7 @@ import (
"io/ioutil"
"net/http"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
const (
@@ -32,7 +32,7 @@ const (
// This is designed to be used in a defer statement.
func DeferClose(c io.Closer) {
if err := c.Close(); err != nil {
- glog.Error(err)
+ klog.Error(err)
}
}
diff --git a/pkg/label/label.go b/pkg/label/label.go
index e51bc3f6f8..472dc70810 100644
--- a/pkg/label/label.go
+++ b/pkg/label/label.go
@@ -84,6 +84,8 @@ const (
AnnEvictLeaderBeginTime = "tidb.pingcap.com/evictLeaderBeginTime"
// AnnPodDeferDeleting is pod annotation key to indicate the pod which need to be restarted
AnnPodDeferDeleting = "tidb.pingcap.com/pod-defer-deleting"
+ // AnnStsSyncTimestamp is sts annotation key to indicate the last timestamp the operator sync the sts
+ AnnStsLastSyncTimestamp = "tidb.pingcap.com/sync-timestamp"
// AnnForceUpgradeVal is tc annotation value to indicate whether force upgrade should be done
AnnForceUpgradeVal = "true"
@@ -102,12 +104,34 @@ const (
// AnnTiKVLastAutoScalingTimestamp is annotation key of tidbclusterto which ordinal is created by tikv auto-scaling
AnnTiKVLastAutoScalingTimestamp = "tikv.tidb.pingcap.com/last-autoscaling-timestamp"
+ // AnnTiDBConsecutiveScaleOutCount describes the least consecutive count to scale-out for tidb
+ AnnTiDBConsecutiveScaleOutCount = "tidb.tidb.pingcap.com/consecutive-scale-out-count"
+ // AnnTiDBConsecutiveScaleInCount describes the least consecutive count to scale-in for tidb
+ AnnTiDBConsecutiveScaleInCount = "tidb.tidb.pingcap.com/consecutive-scale-in-count"
+ // AnnTiKVConsecutiveScaleOutCount describes the least consecutive count to scale-out for tikv
+ AnnTiKVConsecutiveScaleOutCount = "tikv.tidb.pingcap.com/consecutive-scale-out-count"
+ // AnnTiKVConsecutiveScaleInCount describes the least consecutive count to scale-in for tikv
+ AnnTiKVConsecutiveScaleInCount = "tikv.tidb.pingcap.com/consecutive-scale-in-count"
+ // AnnAutoScalingTargetName describes the target TidbCluster Ref Name for the TidbCluserAutoScaler
+ AnnAutoScalingTargetName = "auto-scaling.tidb.pingcap.com/target-name"
+ // AnnAutoScalingTargetNamespace describes the target TidbCluster Ref Namespace for the TidbCluserAutoScaler
+ AnnAutoScalingTargetNamespace = "auto-scaling.tidb.pingcap.com/target-namespace"
+ // AnnTiKVAutoScalingOutOrdinals describe the tikv pods' ordinal list which is created by auto-scaling out
+ AnnTiKVAutoScalingOutOrdinals = "tikv.tidb.pingcap.com/scale-out-ordinals"
+ // AnnTiDBAutoScalingOutOrdinals describe the tidb pods' ordinal list which is created by auto-scaling out
+ AnnTiDBAutoScalingOutOrdinals = "tidb.tidb.pingcap.com/scale-out-ordinals"
+
+ // AnnSkipTLSWhenConnectTiDB describes whether skip TLS when connecting to TiDB Server
+ AnnSkipTLSWhenConnectTiDB = "tidb.tidb.pingcap.com/skip-tls-when-connect-tidb"
+
// PDLabelVal is PD label value
PDLabelVal string = "pd"
// TiDBLabelVal is TiDB label value
TiDBLabelVal string = "tidb"
// TiKVLabelVal is TiKV label value
TiKVLabelVal string = "tikv"
+ // TiFlashLabelVal is TiKV label value
+ TiFlashLabelVal string = "tiflash"
// PumpLabelVal is Pump label value
PumpLabelVal string = "pump"
// DiscoveryLabelVal is Discovery label value
@@ -172,10 +196,10 @@ func NewBackupSchedule() Label {
}
}
-// NewMonitor initialize a new label for monitor of tidb-monitor
func NewMonitor() Label {
return Label{
- NameLabelKey: TiDBMonitorVal,
+ // NameLabelKey is used to be compatible with helm monitor
+ NameLabelKey: "tidb-cluster",
ManagedByLabelKey: TiDBOperator,
}
}
@@ -285,6 +309,12 @@ func (l Label) TiKV() Label {
return l
}
+// TiFlash assigns tiflash to component key in label
+func (l Label) TiFlash() Label {
+ l.Component(TiFlashLabelVal)
+ return l
+}
+
// IsTiKV returns whether label is a TiKV
func (l Label) IsTiKV() bool {
return l[ComponentLabelKey] == TiKVLabelVal
diff --git a/pkg/manager/member/failover.go b/pkg/manager/member/failover.go
index 7744ee7a80..7a71f1015e 100644
--- a/pkg/manager/member/failover.go
+++ b/pkg/manager/member/failover.go
@@ -15,6 +15,11 @@ package member
import "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+const (
+ unHealthEventReason = "Unhealthy"
+ unHealthEventMsgPattern = "%s pod[%s] is unhealthy, msg:%s"
+)
+
// Failover implements the logic for pd/tikv/tidb's failover and recovery.
type Failover interface {
Failover(*v1alpha1.TidbCluster) error
diff --git a/pkg/manager/member/orphan_pods_cleaner.go b/pkg/manager/member/orphan_pods_cleaner.go
index e961ca5d49..14aa5687a2 100644
--- a/pkg/manager/member/orphan_pods_cleaner.go
+++ b/pkg/manager/member/orphan_pods_cleaner.go
@@ -17,21 +17,20 @@ import (
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/label"
- v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
const (
- skipReasonOrphanPodsCleanerIsNotPDOrTiKV = "orphan pods cleaner: member type is not pd or tikv"
- skipReasonOrphanPodsCleanerPVCNameIsEmpty = "orphan pods cleaner: pvcName is empty"
- skipReasonOrphanPodsCleanerPVCIsFound = "orphan pods cleaner: pvc is found"
- skipReasonOrphanPodsCleanerPodIsNotPending = "orphan pods cleaner: pod is not pending"
- skipReasonOrphanPodsCleanerPodIsNotFound = "orphan pods cleaner: pod does not exist anymore"
- skipReasonOrphanPodsCleanerPodChanged = "orphan pods cleaner: pod changed before deletion"
+ skipReasonOrphanPodsCleanerIsNotPDOrTiKV = "orphan pods cleaner: member type is not pd or tikv"
+ skipReasonOrphanPodsCleanerPVCNameIsEmpty = "orphan pods cleaner: pvcName is empty"
+ skipReasonOrphanPodsCleanerPVCIsFound = "orphan pods cleaner: pvc is found"
+ skipReasonOrphanPodsCleanerPodHasBeenScheduled = "orphan pods cleaner: pod has been scheduled"
+ skipReasonOrphanPodsCleanerPodIsNotFound = "orphan pods cleaner: pod does not exist anymore"
+ skipReasonOrphanPodsCleanerPodChanged = "orphan pods cleaner: pod changed before deletion"
)
// OrphanPodsCleaner implements the logic for cleaning the orphan pods(has no pvc)
@@ -88,8 +87,8 @@ func (opc *orphanPodsCleaner) Clean(tc *v1alpha1.TidbCluster) (map[string]string
continue
}
- if pod.Status.Phase != v1.PodPending {
- skipReason[podName] = skipReasonOrphanPodsCleanerPodIsNotPending
+ if len(pod.Spec.NodeName) > 0 {
+ skipReason[podName] = skipReasonOrphanPodsCleanerPodHasBeenScheduled
continue
}
@@ -128,7 +127,7 @@ func (opc *orphanPodsCleaner) Clean(tc *v1alpha1.TidbCluster) (map[string]string
}
// if the PVC is not found in apiserver (also informer cache) and the
- // phase of the Pod is Pending, delete it and let the stateful
+ // pod has not been scheduled, delete it and let the stateful
// controller to create the pod and its PVC(s) again
apiPod, err := opc.kubeCli.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if errors.IsNotFound(err) {
@@ -138,18 +137,19 @@ func (opc *orphanPodsCleaner) Clean(tc *v1alpha1.TidbCluster) (map[string]string
if err != nil {
return skipReason, err
}
- // try our best to avoid deleting wrong object in apiserver
- // TODO upgrade to use deleteOption.Preconditions.ResourceVersion in client-go 1.14+
+ // In pre-1.14, kube-apiserver does not support
+ // deleteOption.Preconditions.ResourceVersion, we try our best to avoid
+ // deleting wrong object in apiserver.
if apiPod.UID != pod.UID || apiPod.ResourceVersion != pod.ResourceVersion {
skipReason[podName] = skipReasonOrphanPodsCleanerPodChanged
continue
}
err = opc.podControl.DeletePod(tc, pod)
if err != nil {
- glog.Errorf("orphan pods cleaner: failed to clean orphan pod: %s/%s, %v", ns, podName, err)
+ klog.Errorf("orphan pods cleaner: failed to clean orphan pod: %s/%s, %v", ns, podName, err)
return skipReason, err
}
- glog.Infof("orphan pods cleaner: clean orphan pod: %s/%s successfully", ns, podName)
+ klog.Infof("orphan pods cleaner: clean orphan pod: %s/%s successfully", ns, podName)
}
return skipReason, nil
diff --git a/pkg/manager/member/orphan_pods_cleaner_test.go b/pkg/manager/member/orphan_pods_cleaner_test.go
index d7bc6251d3..f954b7c3d8 100644
--- a/pkg/manager/member/orphan_pods_cleaner_test.go
+++ b/pkg/manager/member/orphan_pods_cleaner_test.go
@@ -227,7 +227,9 @@ func TestOrphanPodsCleanerClean(t *testing.T) {
},
},
{
- name: "pvc is not found but pod is not pending",
+ // in theory, this is is possible because we can't check the PVC
+ // and pod in an atomic operation.
+ name: "pvc is not found but pod has been scheduled",
pods: []*corev1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
@@ -246,9 +248,7 @@ func TestOrphanPodsCleanerClean(t *testing.T) {
},
},
},
- },
- Status: corev1.PodStatus{
- Phase: corev1.PodRunning,
+ NodeName: "foobar",
},
},
},
@@ -256,7 +256,7 @@ func TestOrphanPodsCleanerClean(t *testing.T) {
expectFn: func(g *GomegaWithT, skipReason map[string]string, opc *orphanPodsCleaner, err error) {
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(skipReason)).To(Equal(1))
- g.Expect(skipReason["pod-1"]).To(Equal(skipReasonOrphanPodsCleanerPodIsNotPending))
+ g.Expect(skipReason["pod-1"]).To(Equal(skipReasonOrphanPodsCleanerPodHasBeenScheduled))
},
},
{
diff --git a/pkg/manager/member/pd_failover.go b/pkg/manager/member/pd_failover.go
index 449474d28a..b5ba116218 100644
--- a/pkg/manager/member/pd_failover.go
+++ b/pkg/manager/member/pd_failover.go
@@ -28,10 +28,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/record"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
-// TODO add maxFailoverCount
type pdFailover struct {
cli versioned.Interface
pdControl pdapi.PDControlInterface
@@ -93,6 +92,12 @@ func (pf *pdFailover) Failover(tc *v1alpha1.TidbCluster) error {
ns, tcName, healthCount, tc.PDStsDesiredReplicas(), tc.Spec.PD.Replicas, len(tc.Status.PD.FailureMembers))
}
+ failureReplicas := getFailureReplicas(tc)
+ if failureReplicas >= int(*tc.Spec.PD.MaxFailoverCount) {
+ klog.Errorf("PD failover replicas (%d) reaches the limit (%d), skip failover", failureReplicas, *tc.Spec.PD.MaxFailoverCount)
+ return nil
+ }
+
notDeletedCount := 0
for _, pdMember := range tc.Status.PD.FailureMembers {
if !pdMember.MemberDeleted {
@@ -109,7 +114,7 @@ func (pf *pdFailover) Failover(tc *v1alpha1.TidbCluster) error {
func (pf *pdFailover) Recover(tc *v1alpha1.TidbCluster) {
tc.Status.PD.FailureMembers = nil
- glog.Infof("pd failover: clearing pd failoverMembers, %s/%s", tc.GetNamespace(), tc.GetName())
+ klog.Infof("pd failover: clearing pd failoverMembers, %s/%s", tc.GetNamespace(), tc.GetName())
}
func (pf *pdFailover) tryToMarkAPeerAsFailure(tc *v1alpha1.TidbCluster) error {
@@ -140,8 +145,8 @@ func (pf *pdFailover) tryToMarkAPeerAsFailure(tc *v1alpha1.TidbCluster) error {
return err
}
- pf.recorder.Eventf(tc, apiv1.EventTypeWarning, "PDMemberMarkedAsFailure",
- "%s(%s) marked as a failure member", podName, pdMember.ID)
+ msg := fmt.Sprintf("pd member[%s] is unhealthy", pdMember.ID)
+ pf.recorder.Event(tc, apiv1.EventTypeWarning, unHealthEventReason, fmt.Sprintf(unHealthEventMsgPattern, "pd", podName, msg))
tc.Status.PD.FailureMembers[podName] = v1alpha1.PDFailureMember{
PodName: podName,
@@ -180,10 +185,10 @@ func (pf *pdFailover) tryToDeleteAFailureMember(tc *v1alpha1.TidbCluster) error
// invoke deleteMember api to delete a member from the pd cluster
err = controller.GetPDClient(pf.pdControl, tc).DeleteMemberByID(memberID)
if err != nil {
- glog.Errorf("pd failover: failed to delete member: %d, %v", memberID, err)
+ klog.Errorf("pd failover: failed to delete member: %d, %v", memberID, err)
return err
}
- glog.Infof("pd failover: delete member: %d successfully", memberID)
+ klog.Infof("pd failover: delete member: %d successfully", memberID)
pf.recorder.Eventf(tc, apiv1.EventTypeWarning, "PDMemberDeleted",
"%s(%d) deleted from cluster", failurePodName, memberID)
@@ -215,10 +220,10 @@ func (pf *pdFailover) tryToDeleteAFailureMember(tc *v1alpha1.TidbCluster) error
if pvc != nil && pvc.DeletionTimestamp == nil && pvc.GetUID() == failureMember.PVCUID {
err = pf.pvcControl.DeletePVC(tc, pvc)
if err != nil {
- glog.Errorf("pd failover: failed to delete pvc: %s/%s, %v", ns, pvcName, err)
+ klog.Errorf("pd failover: failed to delete pvc: %s/%s, %v", ns, pvcName, err)
return err
}
- glog.Infof("pd failover: pvc: %s/%s successfully", ns, pvcName)
+ klog.Infof("pd failover: pvc: %s/%s successfully", ns, pvcName)
}
setMemberDeleted(tc, failurePodName)
@@ -229,7 +234,7 @@ func setMemberDeleted(tc *v1alpha1.TidbCluster, podName string) {
failureMember := tc.Status.PD.FailureMembers[podName]
failureMember.MemberDeleted = true
tc.Status.PD.FailureMembers[podName] = failureMember
- glog.Infof("pd failover: set pd member: %s/%s deleted", tc.GetName(), podName)
+ klog.Infof("pd failover: set pd member: %s/%s deleted", tc.GetName(), podName)
}
type fakePDFailover struct{}
diff --git a/pkg/manager/member/pd_failover_test.go b/pkg/manager/member/pd_failover_test.go
index cd9ca847d1..953589830a 100644
--- a/pkg/manager/member/pd_failover_test.go
+++ b/pkg/manager/member/pd_failover_test.go
@@ -33,6 +33,7 @@ import (
kubefake "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
+ "k8s.io/utils/pointer"
)
func TestPDFailoverFailover(t *testing.T) {
@@ -42,6 +43,7 @@ func TestPDFailoverFailover(t *testing.T) {
type testcase struct {
name string
update func(*v1alpha1.TidbCluster)
+ maxFailoverCount int32
hasPVC bool
hasPod bool
podWithDeletionTimestamp bool
@@ -53,53 +55,12 @@ func TestPDFailoverFailover(t *testing.T) {
errExpectFn func(*GomegaWithT, error)
expectFn func(*v1alpha1.TidbCluster, *pdFailover)
}
- testFn := func(test *testcase, t *testing.T) {
- t.Log(test.name)
- tc := newTidbClusterForPD()
- test.update(tc)
-
- pdFailover, pvcIndexer, podIndexer, fakePDControl, fakePodControl, fakePVCControl := newFakePDFailover()
- pdClient := controller.NewFakePDClient(fakePDControl, tc)
- pdFailover.recorder = recorder
-
- pdClient.AddReaction(pdapi.DeleteMemberByIDActionType, func(action *pdapi.Action) (interface{}, error) {
- if test.delMemberFailed {
- return nil, fmt.Errorf("failed to delete member")
- }
- return nil, nil
- })
-
- if test.hasPVC {
- pvc := newPVCForPDFailover(tc, v1alpha1.PDMemberType, 1)
- if test.pvcWithDeletionTimestamp {
- pvc.DeletionTimestamp = &metav1.Time{Time: time.Now()}
- }
- pvcIndexer.Add(pvc)
- }
- if test.hasPod {
- pod := newPodForPDFailover(tc, v1alpha1.PDMemberType, 1)
- if test.podWithDeletionTimestamp {
- pod.DeletionTimestamp = &metav1.Time{Time: time.Now()}
- }
- podIndexer.Add(pod)
- }
- if test.delPodFailed {
- fakePodControl.SetDeletePodError(errors.NewInternalError(fmt.Errorf("delete pod: API server failed")), 0)
- }
- if test.delPVCFailed {
- fakePVCControl.SetDeletePVCError(errors.NewInternalError(fmt.Errorf("delete pvc: API server failed")), 0)
- }
- tc.Status.PD.Synced = !test.statusSyncFailed
-
- err := pdFailover.Failover(tc)
- test.errExpectFn(g, err)
- test.expectFn(tc, pdFailover)
- }
tests := []testcase{
{
name: "all members are ready",
update: allMembersReady,
+ maxFailoverCount: 3,
hasPVC: true,
hasPod: true,
podWithDeletionTimestamp: false,
@@ -118,6 +79,7 @@ func TestPDFailoverFailover(t *testing.T) {
{
name: "pd status sync failed",
update: allMembersReady,
+ maxFailoverCount: 3,
hasPVC: true,
hasPod: true,
podWithDeletionTimestamp: false,
@@ -135,6 +97,7 @@ func TestPDFailoverFailover(t *testing.T) {
{
name: "two members are not ready, not in quorum",
update: twoMembersNotReady,
+ maxFailoverCount: 3,
hasPVC: true,
hasPod: true,
podWithDeletionTimestamp: false,
@@ -159,6 +122,7 @@ func TestPDFailoverFailover(t *testing.T) {
{
name: "two members are ready and a failure member",
update: oneFailureMember,
+ maxFailoverCount: 3,
hasPVC: true,
hasPod: true,
podWithDeletionTimestamp: false,
@@ -187,6 +151,7 @@ func TestPDFailoverFailover(t *testing.T) {
pd1.LastTransitionTime = metav1.Time{Time: time.Now().Add(-2 * time.Minute)}
tc.Status.PD.Members[pd1Name] = pd1
},
+ maxFailoverCount: 3,
hasPVC: true,
hasPod: true,
podWithDeletionTimestamp: false,
@@ -212,6 +177,7 @@ func TestPDFailoverFailover(t *testing.T) {
pd1.LastTransitionTime = metav1.Time{}
tc.Status.PD.Members[pd1Name] = pd1
},
+ maxFailoverCount: 3,
hasPVC: true,
hasPod: true,
podWithDeletionTimestamp: false,
@@ -231,6 +197,7 @@ func TestPDFailoverFailover(t *testing.T) {
{
name: "has one not ready member, don't have pvc",
update: oneNotReadyMember,
+ maxFailoverCount: 3,
hasPVC: false,
hasPod: true,
podWithDeletionTimestamp: false,
@@ -253,6 +220,7 @@ func TestPDFailoverFailover(t *testing.T) {
{
name: "has one not ready member",
update: oneNotReadyMember,
+ maxFailoverCount: 3,
hasPVC: true,
hasPod: true,
podWithDeletionTimestamp: false,
@@ -275,12 +243,33 @@ func TestPDFailoverFailover(t *testing.T) {
events := collectEvents(recorder.Events)
g.Expect(events).To(HaveLen(2))
g.Expect(events[0]).To(ContainSubstring("test-pd-1(12891273174085095651) is unhealthy"))
- g.Expect(events[1]).To(ContainSubstring("test-pd-1(12891273174085095651) marked as a failure member"))
+ g.Expect(events[1]).To(ContainSubstring("Unhealthy pd pod[test-pd-1] is unhealthy, msg:pd member[12891273174085095651] is unhealthy"))
+ },
+ },
+ {
+ name: "has one not ready member but maxFailoverCount is 0",
+ update: oneNotReadyMember,
+ maxFailoverCount: 0,
+ hasPVC: true,
+ hasPod: true,
+ podWithDeletionTimestamp: false,
+ delMemberFailed: false,
+ delPodFailed: false,
+ delPVCFailed: false,
+ statusSyncFailed: false,
+ errExpectFn: errExpectNil,
+ expectFn: func(tc *v1alpha1.TidbCluster, _ *pdFailover) {
+ g.Expect(int(tc.Spec.PD.Replicas)).To(Equal(3))
+ g.Expect(len(tc.Status.PD.FailureMembers)).To(Equal(0))
+ events := collectEvents(recorder.Events)
+ g.Expect(events).To(HaveLen(1))
+ g.Expect(events[0]).To(ContainSubstring("test-pd-1(12891273174085095651) is unhealthy"))
},
},
{
name: "has one not ready member, and exceed deadline, don't have PVC, has Pod, delete pod success",
update: oneNotReadyMemberAndAFailureMember,
+ maxFailoverCount: 3,
hasPVC: false,
hasPod: true,
podWithDeletionTimestamp: false,
@@ -310,6 +299,7 @@ func TestPDFailoverFailover(t *testing.T) {
pd1.MemberID = "wrong-id"
tc.Status.PD.FailureMembers[pd1Name] = pd1
},
+ maxFailoverCount: 3,
hasPVC: false,
hasPod: true,
podWithDeletionTimestamp: false,
@@ -335,6 +325,7 @@ func TestPDFailoverFailover(t *testing.T) {
{
name: "has one not ready member, and exceed deadline, don't have PVC, has Pod, delete member failed",
update: oneNotReadyMemberAndAFailureMember,
+ maxFailoverCount: 3,
hasPVC: false,
hasPod: true,
podWithDeletionTimestamp: false,
@@ -360,6 +351,7 @@ func TestPDFailoverFailover(t *testing.T) {
{
name: "has one not ready member, and exceed deadline, don't have PVC, has Pod, delete pod failed",
update: oneNotReadyMemberAndAFailureMember,
+ maxFailoverCount: 3,
hasPVC: false,
hasPod: true,
podWithDeletionTimestamp: false,
@@ -386,6 +378,7 @@ func TestPDFailoverFailover(t *testing.T) {
{
name: "has one not ready member, and exceed deadline, has Pod, delete pvc failed",
update: oneNotReadyMemberAndAFailureMember,
+ maxFailoverCount: 3,
hasPVC: true,
hasPod: true,
podWithDeletionTimestamp: false,
@@ -412,6 +405,7 @@ func TestPDFailoverFailover(t *testing.T) {
{
name: "has one not ready member, and exceed deadline, has Pod with deletion timestamp",
update: oneNotReadyMemberAndAFailureMember,
+ maxFailoverCount: 3,
hasPVC: true,
hasPod: true,
podWithDeletionTimestamp: true,
@@ -441,6 +435,7 @@ func TestPDFailoverFailover(t *testing.T) {
{
name: "has one not ready member, and exceed deadline, has PVC with deletion timestamp",
update: oneNotReadyMemberAndAFailureMember,
+ maxFailoverCount: 3,
hasPVC: true,
hasPod: true,
podWithDeletionTimestamp: false,
@@ -470,8 +465,50 @@ func TestPDFailoverFailover(t *testing.T) {
},
}
- for i := range tests {
- testFn(&tests[i], t)
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ tc := newTidbClusterForPD()
+ tc.Spec.PD.MaxFailoverCount = pointer.Int32Ptr(test.maxFailoverCount)
+ test.update(tc)
+
+ pdFailover, pvcIndexer, podIndexer, fakePDControl, fakePodControl, fakePVCControl := newFakePDFailover()
+ pdClient := controller.NewFakePDClient(fakePDControl, tc)
+ pdFailover.recorder = recorder
+
+ pdClient.AddReaction(pdapi.DeleteMemberByIDActionType, func(action *pdapi.Action) (interface{}, error) {
+ if test.delMemberFailed {
+ return nil, fmt.Errorf("failed to delete member")
+ }
+ return nil, nil
+ })
+
+ if test.hasPVC {
+ pvc := newPVCForPDFailover(tc, v1alpha1.PDMemberType, 1)
+ if test.pvcWithDeletionTimestamp {
+ pvc.DeletionTimestamp = &metav1.Time{Time: time.Now()}
+ }
+ pvcIndexer.Add(pvc)
+ }
+ if test.hasPod {
+ pod := newPodForPDFailover(tc, v1alpha1.PDMemberType, 1)
+ if test.podWithDeletionTimestamp {
+ pod.DeletionTimestamp = &metav1.Time{Time: time.Now()}
+ }
+ podIndexer.Add(pod)
+ }
+ if test.delPodFailed {
+ fakePodControl.SetDeletePodError(errors.NewInternalError(fmt.Errorf("delete pod: API server failed")), 0)
+ }
+ if test.delPVCFailed {
+ fakePVCControl.SetDeletePVCError(errors.NewInternalError(fmt.Errorf("delete pvc: API server failed")), 0)
+ }
+
+ tc.Status.PD.Synced = !test.statusSyncFailed
+
+ err := pdFailover.Failover(tc)
+ test.errExpectFn(g, err)
+ test.expectFn(tc, pdFailover)
+ })
}
}
diff --git a/pkg/manager/member/pd_member_manager.go b/pkg/manager/member/pd_member_manager.go
index 603863edcd..fad42640d9 100644
--- a/pkg/manager/member/pd_member_manager.go
+++ b/pkg/manager/member/pd_member_manager.go
@@ -15,6 +15,7 @@ package member
import (
"fmt"
+ "path"
"strconv"
"strings"
@@ -32,7 +33,13 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
v1 "k8s.io/client-go/listers/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
+)
+
+const (
+ // pdClusterCertPath is where the cert for inter-cluster communication stored (if any)
+ pdClusterCertPath = "/var/lib/pd-tls"
+ tidbClientCertPath = "/var/lib/tidb-client-tls"
)
type pdMemberManager struct {
@@ -40,7 +47,6 @@ type pdMemberManager struct {
setControl controller.StatefulSetControlInterface
svcControl controller.ServiceControlInterface
podControl controller.PodControlInterface
- certControl controller.CertControlInterface
typedControl controller.TypedControlInterface
setLister v1.StatefulSetLister
svcLister corelisters.ServiceLister
@@ -58,7 +64,6 @@ func NewPDMemberManager(pdControl pdapi.PDControlInterface,
setControl controller.StatefulSetControlInterface,
svcControl controller.ServiceControlInterface,
podControl controller.PodControlInterface,
- certControl controller.CertControlInterface,
typedControl controller.TypedControlInterface,
setLister v1.StatefulSetLister,
svcLister corelisters.ServiceLister,
@@ -74,7 +79,6 @@ func NewPDMemberManager(pdControl pdapi.PDControlInterface,
setControl,
svcControl,
podControl,
- certControl,
typedControl,
setLister,
svcLister,
@@ -103,6 +107,11 @@ func (pmm *pdMemberManager) Sync(tc *v1alpha1.TidbCluster) error {
}
func (pmm *pdMemberManager) syncPDServiceForTidbCluster(tc *v1alpha1.TidbCluster) error {
+ if tc.Spec.Paused {
+ klog.V(4).Infof("tidb cluster %s/%s is paused, skip syncing for pd service", tc.GetNamespace(), tc.GetName())
+ return nil
+ }
+
ns := tc.GetNamespace()
tcName := tc.GetName()
@@ -142,6 +151,11 @@ func (pmm *pdMemberManager) syncPDServiceForTidbCluster(tc *v1alpha1.TidbCluster
}
func (pmm *pdMemberManager) syncPDHeadlessServiceForTidbCluster(tc *v1alpha1.TidbCluster) error {
+ if tc.Spec.Paused {
+ klog.V(4).Infof("tidb cluster %s/%s is paused, skip syncing for pd headless service", tc.GetNamespace(), tc.GetName())
+ return nil
+ }
+
ns := tc.GetNamespace()
tcName := tc.GetName()
@@ -187,6 +201,16 @@ func (pmm *pdMemberManager) syncPDStatefulSetForTidbCluster(tc *v1alpha1.TidbClu
setNotExist := errors.IsNotFound(err)
oldPDSet := oldPDSetTmp.DeepCopy()
+
+ if err := pmm.syncTidbClusterStatus(tc, oldPDSet); err != nil {
+ klog.Errorf("failed to sync TidbCluster: [%s/%s]'s status, error: %v", ns, tcName, err)
+ }
+
+ if tc.Spec.Paused {
+ klog.V(4).Infof("tidb cluster %s/%s is paused, skip syncing for pd statefulset", tc.GetNamespace(), tc.GetName())
+ return nil
+ }
+
cm, err := pmm.syncPDConfigMap(tc, oldPDSet)
if err != nil {
return err
@@ -200,16 +224,6 @@ func (pmm *pdMemberManager) syncPDStatefulSetForTidbCluster(tc *v1alpha1.TidbClu
if err != nil {
return err
}
- if tc.IsTLSClusterEnabled() {
- err := pmm.syncPDServerCerts(tc)
- if err != nil {
- return err
- }
- err = pmm.syncPDClientCerts(tc)
- if err != nil {
- return err
- }
- }
if err := pmm.setControl.CreateStatefulSet(tc, newPDSet); err != nil {
return err
}
@@ -217,10 +231,6 @@ func (pmm *pdMemberManager) syncPDStatefulSetForTidbCluster(tc *v1alpha1.TidbClu
return controller.RequeueErrorf("TidbCluster: [%s/%s], waiting for PD cluster running", ns, tcName)
}
- if err := pmm.syncTidbClusterStatus(tc, oldPDSet); err != nil {
- glog.Errorf("failed to sync TidbCluster: [%s/%s]'s status, error: %v", ns, tcName, err)
- }
-
if !tc.Status.PD.Synced {
force := NeedForceUpgrade(tc)
if force {
@@ -254,63 +264,12 @@ func (pmm *pdMemberManager) syncPDStatefulSetForTidbCluster(tc *v1alpha1.TidbClu
return updateStatefulSet(pmm.setControl, tc, newPDSet, oldPDSet)
}
-func (pmm *pdMemberManager) syncPDClientCerts(tc *v1alpha1.TidbCluster) error {
- ns := tc.GetNamespace()
- tcName := tc.GetName()
- commonName := fmt.Sprintf("%s-pd-client", tcName)
-
- hostList := []string{
- commonName,
- }
-
- certOpts := &controller.TiDBClusterCertOptions{
- Namespace: ns,
- Instance: tcName,
- CommonName: commonName,
- HostList: hostList,
- Component: "pd",
- Suffix: "pd-client",
- }
-
- return pmm.certControl.Create(controller.GetOwnerRef(tc), certOpts)
-}
-
-func (pmm *pdMemberManager) syncPDServerCerts(tc *v1alpha1.TidbCluster) error {
- ns := tc.GetNamespace()
- tcName := tc.GetName()
- svcName := controller.PDMemberName(tcName)
- peerName := controller.PDPeerMemberName(tcName)
-
- if pmm.certControl.CheckSecret(ns, svcName) {
+func (pmm *pdMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, set *apps.StatefulSet) error {
+ if set == nil {
+ // skip if not created yet
return nil
}
- hostList := []string{
- svcName,
- peerName,
- fmt.Sprintf("%s.%s", svcName, ns),
- fmt.Sprintf("%s.%s", peerName, ns),
- fmt.Sprintf("*.%s.%s.svc", peerName, ns),
- }
-
- ipList := []string{
- "127.0.0.1", "::1", // able to access https endpoint via loopback network
- }
-
- certOpts := &controller.TiDBClusterCertOptions{
- Namespace: ns,
- Instance: tcName,
- CommonName: svcName,
- HostList: hostList,
- IPList: ipList,
- Component: "pd",
- Suffix: "pd",
- }
-
- return pmm.certControl.Create(controller.GetOwnerRef(tc), certOpts)
-}
-
-func (pmm *pdMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, set *apps.StatefulSet) error {
ns := tc.GetNamespace()
tcName := tc.GetName()
@@ -364,7 +323,7 @@ func (pmm *pdMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, set
}
name := memberHealth.Name
if len(name) == 0 {
- glog.Warningf("PD member: [%d] doesn't have a name, and can't get it from clientUrls: [%s], memberHealth Info: [%v] in [%s/%s]",
+ klog.Warningf("PD member: [%d] doesn't have a name, and can't get it from clientUrls: [%s], memberHealth Info: [%v] in [%s/%s]",
id, memberHealth.ClientUrls, memberHealth, ns, tcName)
continue
}
@@ -389,6 +348,11 @@ func (pmm *pdMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, set
tc.Status.PD.Synced = true
tc.Status.PD.Members = pdStatus
tc.Status.PD.Leader = tc.Status.PD.Members[leader.GetName()]
+ tc.Status.PD.Image = ""
+ c := filterContainer(set, "pd")
+ if c != nil {
+ tc.Status.PD.Image = c.Image
+ }
// k8s check
err = pmm.collectUnjoinedMembers(tc, set, pdStatus)
@@ -461,6 +425,9 @@ func (pmm *pdMemberManager) getNewPDServiceForTidbCluster(tc *v1alpha1.TidbClust
if svcSpec.ClusterIP != nil {
pdService.Spec.ClusterIP = *svcSpec.ClusterIP
}
+ if svcSpec.PortName != nil {
+ pdService.Spec.Ports[0].Name = *svcSpec.PortName
+ }
}
return pdService
}
@@ -522,6 +489,16 @@ func (pmm *pdMemberManager) pdStatefulSetIsUpgrading(set *apps.StatefulSet, tc *
return false, nil
}
+func getFailureReplicas(tc *v1alpha1.TidbCluster) int {
+ failureReplicas := 0
+ for _, failureMember := range tc.Status.PD.FailureMembers {
+ if failureMember.MemberDeleted {
+ failureReplicas++
+ }
+ }
+ return failureReplicas
+}
+
func getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap) (*apps.StatefulSet, error) {
ns := tc.Namespace
tcName := tc.Name
@@ -544,6 +521,11 @@ func getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap) (
Name: "pd-tls", ReadOnly: true, MountPath: "/var/lib/pd-tls",
})
}
+ if tc.Spec.TiDB.IsTLSClientEnabled() && !tc.SkipTLSWhenConnectTiDB() {
+ volMounts = append(volMounts, corev1.VolumeMount{
+ Name: "tidb-client-tls", ReadOnly: true, MountPath: tidbClientCertPath,
+ })
+ }
vols := []corev1.Volume{
annVolume,
@@ -572,7 +554,16 @@ func getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap) (
vols = append(vols, corev1.Volume{
Name: "pd-tls", VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
- SecretName: controller.PDMemberName(tcName),
+ SecretName: util.ClusterTLSSecretName(tc.Name, label.PDLabelVal),
+ },
+ },
+ })
+ }
+ if tc.Spec.TiDB.IsTLSClientEnabled() && !tc.SkipTLSWhenConnectTiDB() {
+ vols = append(vols, corev1.Volume{
+ Name: "tidb-client-tls", VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: util.TiDBClientTLSSecretName(tc.Name),
},
},
})
@@ -587,12 +578,7 @@ func getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap) (
setName := controller.PDMemberName(tcName)
podAnnotations := CombineAnnotations(controller.AnnProm(2379), basePDSpec.Annotations())
stsAnnotations := getStsAnnotations(tc, label.PDLabelVal)
- failureReplicas := 0
- for _, failureMember := range tc.Status.PD.FailureMembers {
- if failureMember.MemberDeleted {
- failureReplicas++
- }
- }
+ failureReplicas := getFailureReplicas(tc)
pdContainer := corev1.Container{
Name: v1alpha1.PDMemberType.String(),
@@ -653,7 +639,7 @@ func getNewPDSetForTidbCluster(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap) (
},
})
}
- pdContainer.Env = env
+ pdContainer.Env = util.AppendEnv(env, basePDSpec.Env())
podSpec.Volumes = vols
podSpec.Containers = []corev1.Container{pdContainer}
@@ -709,6 +695,25 @@ func getPDConfigMap(tc *v1alpha1.TidbCluster) (*corev1.ConfigMap, error) {
if config == nil {
return nil, nil
}
+
+ // override CA if tls enabled
+ if tc.IsTLSClusterEnabled() {
+ if config.Security == nil {
+ config.Security = &v1alpha1.PDSecurityConfig{}
+ }
+ config.Security.CAPath = path.Join(pdClusterCertPath, tlsSecretRootCAKey)
+ config.Security.CertPath = path.Join(pdClusterCertPath, corev1.TLSCertKey)
+ config.Security.KeyPath = path.Join(pdClusterCertPath, corev1.TLSPrivateKeyKey)
+ }
+ if tc.Spec.TiDB.IsTLSClientEnabled() && !tc.SkipTLSWhenConnectTiDB() {
+ if config.Dashboard == nil {
+ config.Dashboard = &v1alpha1.DashboardConfig{}
+ }
+ config.Dashboard.TiDBCAPath = path.Join(tidbClientCertPath, tlsSecretRootCAKey)
+ config.Dashboard.TiDBCertPath = path.Join(tidbClientCertPath, corev1.TLSCertKey)
+ config.Dashboard.TiDBKeyPath = path.Join(tidbClientCertPath, corev1.TLSPrivateKeyKey)
+ }
+
confText, err := MarshalTOML(config)
if err != nil {
return nil, err
diff --git a/pkg/manager/member/pd_member_manager_test.go b/pkg/manager/member/pd_member_manager_test.go
index eeaf9acb5f..b43ed5bab0 100644
--- a/pkg/manager/member/pd_member_manager_test.go
+++ b/pkg/manager/member/pd_member_manager_test.go
@@ -746,14 +746,10 @@ func newFakePDMemberManager() (*pdMemberManager, *controller.FakeStatefulSetCont
epsInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Endpoints()
pvcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().PersistentVolumeClaims()
tcInformer := informers.NewSharedInformerFactory(cli, 0).Pingcap().V1alpha1().TidbClusters()
- csrInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Certificates().V1beta1().CertificateSigningRequests()
- secretInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Secrets()
setControl := controller.NewFakeStatefulSetControl(setInformer, tcInformer)
svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer, tcInformer)
podControl := controller.NewFakePodControl(podInformer)
pdControl := pdapi.NewFakePDControl(kubeCli)
- secControl := controller.NewFakeSecretControl(kubeCli, secretInformer.Lister())
- certControl := controller.NewFakeCertControl(kubeCli, csrInformer.Lister(), secControl)
pdScaler := NewFakePDScaler()
autoFailover := true
pdFailover := NewFakePDFailover()
@@ -765,7 +761,6 @@ func newFakePDMemberManager() (*pdMemberManager, *controller.FakeStatefulSetCont
setControl,
svcControl,
podControl,
- certControl,
controller.NewTypedControl(genericControll),
setInformer.Lister(),
svcInformer.Lister(),
@@ -918,6 +913,20 @@ func testAnnotations(t *testing.T, annotations map[string]string) func(sts *apps
}
}
+func testPDContainerEnv(t *testing.T, env []corev1.EnvVar) func(sts *apps.StatefulSet) {
+ return func(sts *apps.StatefulSet) {
+ got := []corev1.EnvVar{}
+ for _, c := range sts.Spec.Template.Spec.Containers {
+ if c.Name == v1alpha1.PDMemberType.String() {
+ got = c.Env
+ }
+ }
+ if diff := cmp.Diff(env, got); diff != "" {
+ t.Errorf("unexpected (-want, +got): %s", diff)
+ }
+ }
+}
+
func TestGetNewPDSetForTidbCluster(t *testing.T) {
enable := true
tests := []struct {
@@ -1035,6 +1044,74 @@ func TestGetNewPDSetForTidbCluster(t *testing.T) {
}))
},
},
+ {
+ name: "set custom env",
+ tc: v1alpha1.TidbCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "tc",
+ Namespace: "ns",
+ },
+ Spec: v1alpha1.TidbClusterSpec{
+ PD: v1alpha1.PDSpec{
+ ComponentSpec: v1alpha1.ComponentSpec{
+ Env: []corev1.EnvVar{
+ {
+ Name: "DASHBOARD_SESSION_SECRET",
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "dashboard-session-secret",
+ },
+ Key: "encryption_key",
+ },
+ },
+ },
+ {
+ Name: "TZ",
+ Value: "ignored",
+ },
+ },
+ },
+ },
+ },
+ },
+ testSts: testPDContainerEnv(t, []corev1.EnvVar{
+ {
+ Name: "NAMESPACE",
+ ValueFrom: &corev1.EnvVarSource{
+ FieldRef: &corev1.ObjectFieldSelector{
+ FieldPath: "metadata.namespace",
+ },
+ },
+ },
+ {
+ Name: "PEER_SERVICE_NAME",
+ Value: "tc-pd-peer",
+ },
+ {
+ Name: "SERVICE_NAME",
+ Value: "tc-pd",
+ },
+ {
+ Name: "SET_NAME",
+ Value: "tc-pd",
+ },
+ {
+ Name: "TZ",
+ },
+ {
+ Name: "DASHBOARD_SESSION_SECRET",
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "dashboard-session-secret",
+ },
+ Key: "encryption_key",
+ },
+ },
+ },
+ }),
+ },
// TODO add more tests
}
@@ -1086,7 +1163,7 @@ func TestGetPDConfigMap(t *testing.T) {
},
Replication: &v1alpha1.PDReplicationConfig{
MaxReplicas: func() *uint64 { i := uint64(5); return &i }(),
- LocationLabels: v1alpha1.StringSlice{"node", "rack"},
+ LocationLabels: []string{"node", "rack"},
},
},
},
@@ -1396,6 +1473,70 @@ func TestGetNewPdServiceForTidbCluster(t *testing.T) {
},
},
},
+ {
+ name: "basic and specify pd service portname",
+ tc: v1alpha1.TidbCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "ns",
+ },
+ Spec: v1alpha1.TidbClusterSpec{
+ Services: []v1alpha1.Service{
+ {Name: "pd", Type: string(corev1.ServiceTypeLoadBalancer)},
+ },
+ PD: v1alpha1.PDSpec{
+ Service: &v1alpha1.ServiceSpec{Type: corev1.ServiceTypeClusterIP,
+ ClusterIP: pointer.StringPtr("172.20.10.1"),
+ PortName: pointer.StringPtr("http-pd"),
+ },
+ },
+ },
+ },
+ expected: corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo-pd",
+ Namespace: "ns",
+ Labels: map[string]string{
+ "app.kubernetes.io/name": "tidb-cluster",
+ "app.kubernetes.io/managed-by": "tidb-operator",
+ "app.kubernetes.io/instance": "foo",
+ "app.kubernetes.io/component": "pd",
+ },
+ OwnerReferences: []metav1.OwnerReference{
+ {
+ APIVersion: "pingcap.com/v1alpha1",
+ Kind: "TidbCluster",
+ Name: "foo",
+ UID: "",
+ Controller: func(b bool) *bool {
+ return &b
+ }(true),
+ BlockOwnerDeletion: func(b bool) *bool {
+ return &b
+ }(true),
+ },
+ },
+ },
+ Spec: corev1.ServiceSpec{
+ ClusterIP: "172.20.10.1",
+ Type: corev1.ServiceTypeClusterIP,
+ Ports: []corev1.ServicePort{
+ {
+ Name: "http-pd",
+ Port: 2379,
+ TargetPort: intstr.FromInt(2379),
+ Protocol: corev1.ProtocolTCP,
+ },
+ },
+ Selector: map[string]string{
+ "app.kubernetes.io/name": "tidb-cluster",
+ "app.kubernetes.io/managed-by": "tidb-operator",
+ "app.kubernetes.io/instance": "foo",
+ "app.kubernetes.io/component": "pd",
+ },
+ },
+ },
+ },
}
for _, tt := range tests {
diff --git a/pkg/manager/member/pd_scaler.go b/pkg/manager/member/pd_scaler.go
index ead05aa166..47c11f4f74 100644
--- a/pkg/manager/member/pd_scaler.go
+++ b/pkg/manager/member/pd_scaler.go
@@ -24,7 +24,7 @@ import (
"github.com/pingcap/tidb-operator/pkg/pdapi"
apps "k8s.io/api/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// TODO add e2e test specs
@@ -47,7 +47,7 @@ func (psd *pdScaler) Scale(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, n
} else if scaling < 0 {
return psd.ScaleIn(tc, oldSet, newSet)
}
- return nil
+ return psd.SyncAutoScalerAnn(tc, oldSet)
}
func (psd *pdScaler) ScaleOut(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error {
@@ -59,7 +59,7 @@ func (psd *pdScaler) ScaleOut(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet
return nil
}
- glog.Infof("scaling out pd statefulset %s/%s, ordinal: %d (replicas: %d, delete slots: %v)", oldSet.Namespace, oldSet.Name, ordinal, replicas, deleteSlots.List())
+ klog.Infof("scaling out pd statefulset %s/%s, ordinal: %d (replicas: %d, delete slots: %v)", oldSet.Namespace, oldSet.Name, ordinal, replicas, deleteSlots.List())
_, err := psd.deleteDeferDeletingPVC(tc, oldSet.GetName(), v1alpha1.PDMemberType, ordinal)
if err != nil {
return err
@@ -110,7 +110,7 @@ func (psd *pdScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet,
return fmt.Errorf("TidbCluster: %s/%s's pd status sync failed,can't scale in now", ns, tcName)
}
- glog.Infof("scaling in pd statefulset %s/%s, ordinal: %d (replicas: %d, delete slots: %v)", oldSet.Namespace, oldSet.Name, ordinal, replicas, deleteSlots.List())
+ klog.Infof("scaling in pd statefulset %s/%s, ordinal: %d (replicas: %d, delete slots: %v)", oldSet.Namespace, oldSet.Name, ordinal, replicas, deleteSlots.List())
if controller.PodWebhookEnabled {
setReplicasAndDeleteSlots(newSet, replicas, deleteSlots)
@@ -137,10 +137,10 @@ func (psd *pdScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet,
err := pdClient.DeleteMember(memberName)
if err != nil {
- glog.Errorf("pd scale in: failed to delete member %s, %v", memberName, err)
+ klog.Errorf("pd scale in: failed to delete member %s, %v", memberName, err)
return err
}
- glog.Infof("pd scale in: delete member %s successfully", memberName)
+ klog.Infof("pd scale in: delete member %s successfully", memberName)
pvcName := ordinalPVCName(v1alpha1.PDMemberType, setName, ordinal)
pvc, err := psd.pvcLister.PersistentVolumeClaims(ns).Get(pvcName)
@@ -156,17 +156,21 @@ func (psd *pdScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet,
_, err = psd.pvcControl.UpdatePVC(tc, pvc)
if err != nil {
- glog.Errorf("pd scale in: failed to set pvc %s/%s annotation: %s to %s",
+ klog.Errorf("pd scale in: failed to set pvc %s/%s annotation: %s to %s",
ns, pvcName, label.AnnPVCDeferDeleting, now)
return err
}
- glog.Infof("pd scale in: set pvc %s/%s annotation: %s to %s",
+ klog.Infof("pd scale in: set pvc %s/%s annotation: %s to %s",
ns, pvcName, label.AnnPVCDeferDeleting, now)
setReplicasAndDeleteSlots(newSet, replicas, deleteSlots)
return nil
}
+func (psd *pdScaler) SyncAutoScalerAnn(tc *v1alpha1.TidbCluster, actual *apps.StatefulSet) error {
+ return nil
+}
+
type fakePDScaler struct{}
// NewFakePDScaler returns a fake Scaler
@@ -192,3 +196,7 @@ func (fsd *fakePDScaler) ScaleIn(_ *v1alpha1.TidbCluster, oldSet *apps.StatefulS
setReplicasAndDeleteSlots(newSet, *oldSet.Spec.Replicas-1, nil)
return nil
}
+
+func (fsd *fakePDScaler) SyncAutoScalerAnn(tc *v1alpha1.TidbCluster, actual *apps.StatefulSet) error {
+ return nil
+}
diff --git a/pkg/manager/member/pd_scaler_test.go b/pkg/manager/member/pd_scaler_test.go
index 39b4ca7f6e..ecb4c90788 100644
--- a/pkg/manager/member/pd_scaler_test.go
+++ b/pkg/manager/member/pd_scaler_test.go
@@ -15,11 +15,11 @@ package member
import (
"fmt"
- "github.com/pingcap/kvproto/pkg/pdpb"
"testing"
"time"
. "github.com/onsi/gomega"
+ "github.com/pingcap/kvproto/pkg/pdpb"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/label"
diff --git a/pkg/manager/member/pd_upgrader.go b/pkg/manager/member/pd_upgrader.go
index ffa1cc58ef..ae9a5b5397 100644
--- a/pkg/manager/member/pd_upgrader.go
+++ b/pkg/manager/member/pd_upgrader.go
@@ -22,7 +22,7 @@ import (
"github.com/pingcap/tidb-operator/pkg/pdapi"
apps "k8s.io/api/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
type pdUpgrader struct {
@@ -68,12 +68,13 @@ func (pu *pdUpgrader) gracefulUpgrade(tc *v1alpha1.TidbCluster, oldSet *apps.Sta
// If we encounter this situation, we will let the native statefulset controller do the upgrade completely, which may be unsafe for upgrading pd.
// Therefore, in the production environment, we should try to avoid modifying the pd statefulset update strategy directly.
newSet.Spec.UpdateStrategy = oldSet.Spec.UpdateStrategy
- glog.Warningf("tidbcluster: [%s/%s] pd statefulset %s UpdateStrategy has been modified manually", ns, tcName, oldSet.GetName())
+ klog.Warningf("tidbcluster: [%s/%s] pd statefulset %s UpdateStrategy has been modified manually", ns, tcName, oldSet.GetName())
return nil
}
if controller.PodWebhookEnabled {
setUpgradePartition(newSet, 0)
+ return nil
}
setUpgradePartition(newSet, *oldSet.Spec.UpdateStrategy.RollingUpdate.Partition)
@@ -118,10 +119,10 @@ func (pu *pdUpgrader) upgradePDPod(tc *v1alpha1.TidbCluster, ordinal int32, newS
}
err := pu.transferPDLeaderTo(tc, targetName)
if err != nil {
- glog.Errorf("pd upgrader: failed to transfer pd leader to: %s, %v", targetName, err)
+ klog.Errorf("pd upgrader: failed to transfer pd leader to: %s, %v", targetName, err)
return err
}
- glog.Infof("pd upgrader: transfer pd leader to: %s successfully", targetName)
+ klog.Infof("pd upgrader: transfer pd leader to: %s successfully", targetName)
return controller.RequeueErrorf("tidbcluster: [%s/%s]'s pd member: [%s] is transferring leader to pd member: [%s]", ns, tcName, upgradePodName, targetName)
}
diff --git a/pkg/manager/member/pump_member_manager.go b/pkg/manager/member/pump_member_manager.go
index b03f069753..2afd319947 100644
--- a/pkg/manager/member/pump_member_manager.go
+++ b/pkg/manager/member/pump_member_manager.go
@@ -15,12 +15,14 @@ package member
import (
"fmt"
+ "path"
"strings"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/label"
"github.com/pingcap/tidb-operator/pkg/manager"
+ "github.com/pingcap/tidb-operator/pkg/util"
apps "k8s.io/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -29,11 +31,13 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
v1 "k8s.io/client-go/listers/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
const (
defaultPumpLogLevel = "info"
+ pumpCertVolumeMount = "pump-tls"
+ pumpCertPath = "/var/lib/pump-tls"
)
type pumpMemberManager struct {
@@ -43,7 +47,6 @@ type pumpMemberManager struct {
cmControl controller.ConfigMapControlInterface
setLister v1.StatefulSetLister
svcLister corelisters.ServiceLister
- cmLister corelisters.ConfigMapLister
podLister corelisters.PodLister
}
@@ -55,7 +58,6 @@ func NewPumpMemberManager(
cmControl controller.ConfigMapControlInterface,
setLister v1.StatefulSetLister,
svcLister corelisters.ServiceLister,
- cmLister corelisters.ConfigMapLister,
podLister corelisters.PodLister) manager.Manager {
return &pumpMemberManager{
setControl,
@@ -64,7 +66,6 @@ func NewPumpMemberManager(
cmControl,
setLister,
svcLister,
- cmLister,
podLister,
}
}
@@ -81,7 +82,6 @@ func (pmm *pumpMemberManager) Sync(tc *v1alpha1.TidbCluster) error {
//syncPumpStatefulSetForTidbCluster sync statefulset status of pump to tidbcluster
func (pmm *pumpMemberManager) syncPumpStatefulSetForTidbCluster(tc *v1alpha1.TidbCluster) error {
-
oldPumpSetTemp, err := pmm.setLister.StatefulSets(tc.Namespace).Get(controller.PumpMemberName(tc.Name))
if err != nil && !errors.IsNotFound(err) {
return err
@@ -89,6 +89,16 @@ func (pmm *pumpMemberManager) syncPumpStatefulSetForTidbCluster(tc *v1alpha1.Tid
notFound := errors.IsNotFound(err)
oldPumpSet := oldPumpSetTemp.DeepCopy()
+ if err := pmm.syncTiDBClusterStatus(tc, oldPumpSet); err != nil {
+ klog.Errorf("failed to sync TidbCluster: [%s/%s]'s status, error: %v", tc.Namespace, tc.Name, err)
+ return err
+ }
+
+ if tc.Spec.Paused {
+ klog.V(4).Infof("tikv cluster %s/%s is paused, skip syncing for pump statefulset", tc.GetNamespace(), tc.GetName())
+ return nil
+ }
+
cm, err := pmm.syncConfigMap(tc, oldPumpSet)
if err != nil {
return err
@@ -106,15 +116,14 @@ func (pmm *pumpMemberManager) syncPumpStatefulSetForTidbCluster(tc *v1alpha1.Tid
return pmm.setControl.CreateStatefulSet(tc, newPumpSet)
}
- if err := pmm.syncTiDBClusterStatus(tc, oldPumpSet); err != nil {
- glog.Errorf("failed to sync TidbCluster: [%s/%s]'s status, error: %v", tc.Namespace, tc.Name, err)
- return err
- }
-
return updateStatefulSet(pmm.setControl, tc, newPumpSet, oldPumpSet)
}
func (pmm *pumpMemberManager) syncTiDBClusterStatus(tc *v1alpha1.TidbCluster, set *apps.StatefulSet) error {
+ if set == nil {
+ // skip if not created yet
+ return nil
+ }
tc.Status.Pump.StatefulSet = &set.Status
@@ -136,6 +145,10 @@ func (pmm *pumpMemberManager) syncTiDBClusterStatus(tc *v1alpha1.TidbCluster, se
}
func (pmm *pumpMemberManager) syncHeadlessService(tc *v1alpha1.TidbCluster) error {
+ if tc.Spec.Paused {
+ klog.V(4).Infof("tikv cluster %s/%s is paused, skip syncing for pump headless service", tc.GetNamespace(), tc.GetName())
+ return nil
+ }
newSvc := getNewPumpHeadlessService(tc)
oldSvc, err := pmm.svcLister.Services(newSvc.Namespace).Get(newSvc.Name)
@@ -236,15 +249,31 @@ func getNewPumpConfigMap(tc *v1alpha1.TidbCluster) (*corev1.ConfigMap, error) {
spec := tc.Spec.Pump
objMeta, _ := getPumpMeta(tc, controller.PumpMemberName)
+ if tc.IsTLSClusterEnabled() {
+ securityMap := spec.Config["security"]
+ security := map[string]interface{}{}
+ if securityMap != nil {
+ security = securityMap.(map[string]interface{})
+ }
+
+ security["ssl-ca"] = path.Join(pumpCertPath, corev1.ServiceAccountRootCAKey)
+ security["ssl-cert"] = path.Join(pumpCertPath, corev1.TLSCertKey)
+ security["ssl-key"] = path.Join(pumpCertPath, corev1.TLSPrivateKeyKey)
+ spec.Config["security"] = security
+ }
+
confText, err := MarshalTOML(spec.Config)
if err != nil {
return nil, err
}
name := controller.PumpMemberName(tc.Name)
+ confTextStr := string(confText)
+
data := map[string]string{
- "pump-config": string(confText),
+ "pump-config": confTextStr,
}
+
if basePumpSpec.ConfigUpdateStrategy() == v1alpha1.ConfigUpdateStrategyRollingUpdate {
sum, err := Sha256Sum(data)
if err != nil {
@@ -297,6 +326,21 @@ func getNewPumpStatefulSet(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap) (*app
},
})
}
+ volumeMounts := []corev1.VolumeMount{
+ {
+ Name: "data",
+ MountPath: "/data",
+ },
+ {
+ Name: "config",
+ MountPath: "/etc/pump",
+ },
+ }
+ if tc.IsTLSClusterEnabled() {
+ volumeMounts = append(volumeMounts, corev1.VolumeMount{
+ Name: pumpCertVolumeMount, ReadOnly: true, MountPath: pumpCertPath,
+ })
+ }
containers := []corev1.Container{
{
Name: "pump",
@@ -311,18 +355,9 @@ func getNewPumpStatefulSet(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap) (*app
Name: "pump",
ContainerPort: 8250,
}},
- Resources: controller.ContainerResource(tc.Spec.Pump.ResourceRequirements),
- Env: envs,
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: "data",
- MountPath: "/data",
- },
- {
- Name: "config",
- MountPath: "/etc/pump",
- },
- },
+ Resources: controller.ContainerResource(tc.Spec.Pump.ResourceRequirements),
+ Env: util.AppendEnv(envs, spec.Env()),
+ VolumeMounts: volumeMounts,
},
}
@@ -346,6 +381,16 @@ func getNewPumpStatefulSet(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap) (*app
},
}
+ if tc.IsTLSClusterEnabled() {
+ volumes = append(volumes, corev1.Volume{
+ Name: pumpCertVolumeMount, VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: util.ClusterTLSSecretName(tc.Name, label.PumpLabelVal),
+ },
+ },
+ })
+ }
+
volumeClaims := []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
diff --git a/pkg/manager/member/pump_member_manager_test.go b/pkg/manager/member/pump_member_manager_test.go
index c92e41b0de..a2401191f2 100644
--- a/pkg/manager/member/pump_member_manager_test.go
+++ b/pkg/manager/member/pump_member_manager_test.go
@@ -455,7 +455,6 @@ func newFakePumpMemberManager() (*pumpMemberManager, *pumpFakeControls, *pumpFak
cmControl,
setInformer.Lister(),
svcInformer.Lister(),
- cmInformer.Lister(),
podInformer.Lister(),
}
controls := &pumpFakeControls{
diff --git a/pkg/manager/member/pvc_cleaner.go b/pkg/manager/member/pvc_cleaner.go
index 1a4b2bb4f1..64097adeef 100644
--- a/pkg/manager/member/pvc_cleaner.go
+++ b/pkg/manager/member/pvc_cleaner.go
@@ -24,7 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
const (
@@ -170,7 +170,7 @@ func (rpc *realPVCCleaner) reclaimPV(tc *v1alpha1.TidbCluster) (map[string]strin
if err != nil {
return skipReason, fmt.Errorf("cluster %s/%s patch pv %s to %s failed, err: %v", ns, tcName, pvName, corev1.PersistentVolumeReclaimDelete, err)
}
- glog.Infof("cluster %s/%s patch pv %s to policy %s success", ns, tcName, pvName, corev1.PersistentVolumeReclaimDelete)
+ klog.Infof("cluster %s/%s patch pv %s to policy %s success", ns, tcName, pvName, corev1.PersistentVolumeReclaimDelete)
}
apiPVC, err := rpc.kubeCli.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
@@ -190,7 +190,7 @@ func (rpc *realPVCCleaner) reclaimPV(tc *v1alpha1.TidbCluster) (map[string]strin
if err := rpc.pvcControl.DeletePVC(tc, pvc); err != nil {
return skipReason, fmt.Errorf("cluster %s/%s delete pvc %s failed, err: %v", ns, tcName, pvcName, err)
}
- glog.Infof("cluster %s/%s reclaim pv %s success, pvc %s", ns, tcName, pvName, pvcName)
+ klog.Infof("cluster %s/%s reclaim pv %s success, pvc %s", ns, tcName, pvName, pvcName)
}
return skipReason, nil
}
@@ -217,7 +217,7 @@ func (rpc *realPVCCleaner) cleanScheduleLock(tc *v1alpha1.TidbCluster) (map[stri
if pvc.Annotations[label.AnnPVCDeferDeleting] != "" {
if _, exist := pvc.Annotations[label.AnnPVCPodScheduling]; !exist {
// The defer deleting PVC without pod scheduling annotation, do nothing
- glog.V(4).Infof("cluster %s/%s defer delete pvc %s has not pod scheduling annotation, skip clean", ns, tcName, pvcName)
+ klog.V(4).Infof("cluster %s/%s defer delete pvc %s has not pod scheduling annotation, skip clean", ns, tcName, pvcName)
skipReason[pvcName] = skipReasonPVCCleanerDeferDeletePVCNotHasLock
continue
}
@@ -247,14 +247,14 @@ func (rpc *realPVCCleaner) cleanScheduleLock(tc *v1alpha1.TidbCluster) (map[stri
if _, exist := pvc.Annotations[label.AnnPVCPodScheduling]; !exist {
// The PVC without pod scheduling annotation, do nothing
- glog.V(4).Infof("cluster %s/%s pvc %s has not pod scheduling annotation, skip clean", ns, tcName, pvcName)
+ klog.V(4).Infof("cluster %s/%s pvc %s has not pod scheduling annotation, skip clean", ns, tcName, pvcName)
skipReason[pvcName] = skipReasonPVCCleanerPVCNotHasLock
continue
}
if pvc.Status.Phase != corev1.ClaimBound || pod.Spec.NodeName == "" {
// This pod has not been scheduled yet, no need to clean up the pvc pod schedule annotation
- glog.V(4).Infof("cluster %s/%s pod %s has not been scheduled yet, skip clean pvc %s pod schedule annotation", ns, tcName, podName, pvcName)
+ klog.V(4).Infof("cluster %s/%s pod %s has not been scheduled yet, skip clean pvc %s pod schedule annotation", ns, tcName, podName, pvcName)
skipReason[pvcName] = skipReasonPVCCleanerPodWaitingForScheduling
continue
}
@@ -263,7 +263,7 @@ func (rpc *realPVCCleaner) cleanScheduleLock(tc *v1alpha1.TidbCluster) (map[stri
if _, err := rpc.pvcControl.UpdatePVC(tc, pvc); err != nil {
return skipReason, fmt.Errorf("cluster %s/%s remove pvc %s pod scheduling annotation faild, err: %v", ns, tcName, pvcName, err)
}
- glog.Infof("cluster %s/%s, clean pvc %s pod scheduling annotation successfully", ns, tcName, pvcName)
+ klog.Infof("cluster %s/%s, clean pvc %s pod scheduling annotation successfully", ns, tcName, pvcName)
}
return skipReason, nil
diff --git a/pkg/manager/member/scaler.go b/pkg/manager/member/scaler.go
index 017bc6c0db..14b9bcfe71 100644
--- a/pkg/manager/member/scaler.go
+++ b/pkg/manager/member/scaler.go
@@ -26,7 +26,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/sets"
corelisters "k8s.io/client-go/listers/core/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
const (
@@ -43,6 +43,8 @@ type Scaler interface {
ScaleOut(tc *v1alpha1.TidbCluster, actual *apps.StatefulSet, desired *apps.StatefulSet) error
// ScaleIn scales in the cluster
ScaleIn(tc *v1alpha1.TidbCluster, actual *apps.StatefulSet, desired *apps.StatefulSet) error
+ // SyncAutoScalerAnn would sync Ann created by AutoScaler
+ SyncAutoScalerAnn(tc *v1alpha1.TidbCluster, actual *apps.StatefulSet) error
}
type generalScaler struct {
@@ -78,10 +80,10 @@ func (gs *generalScaler) deleteDeferDeletingPVC(tc *v1alpha1.TidbCluster,
err = gs.pvcControl.DeletePVC(tc, pvc)
if err != nil {
- glog.Errorf("scale out: failed to delete pvc %s/%s, %v", ns, pvcName, err)
+ klog.Errorf("scale out: failed to delete pvc %s/%s, %v", ns, pvcName, err)
return skipReason, err
}
- glog.Infof("scale out: delete pvc %s/%s successfully", ns, pvcName)
+ klog.Infof("scale out: delete pvc %s/%s successfully", ns, pvcName)
return skipReason, nil
}
@@ -98,11 +100,11 @@ func setReplicasAndDeleteSlots(newSet *apps.StatefulSet, replicas int32, deleteS
*newSet.Spec.Replicas = replicas
if features.DefaultFeatureGate.Enabled(features.AdvancedStatefulSet) {
helper.SetDeleteSlots(newSet, deleteSlots)
- glog.Infof("scale statefulset: %s/%s replicas from %d to %d (delete slots: %v)",
+ klog.Infof("scale statefulset: %s/%s replicas from %d to %d (delete slots: %v)",
newSet.GetNamespace(), newSet.GetName(), oldReplicas, replicas, deleteSlots.List())
return
}
- glog.Infof("scale statefulset: %s/%s replicas from %d to %d",
+ klog.Infof("scale statefulset: %s/%s replicas from %d to %d",
newSet.GetNamespace(), newSet.GetName(), oldReplicas, replicas)
}
diff --git a/pkg/manager/member/template.go b/pkg/manager/member/template.go
index 9c33dd5d67..44712f13a1 100644
--- a/pkg/manager/member/template.go
+++ b/pkg/manager/member/template.go
@@ -47,7 +47,10 @@ then
tail -f /dev/null
fi
+# Use HOSTNAME if POD_NAME is unset for backward compatibility.
+POD_NAME=${POD_NAME:-$HOSTNAME}
ARGS="--store=tikv \
+--advertise-address=${POD_NAME}.${HEADLESS_SERVICE_NAME}.${NAMESPACE}.svc \
--host=0.0.0.0 \
--path=${CLUSTER_NAME}-pd:2379 \
--config=/etc/tidb/tidb.toml
@@ -216,7 +219,7 @@ fi
# Use HOSTNAME if POD_NAME is unset for backward compatibility.
POD_NAME=${POD_NAME:-$HOSTNAME}
-ARGS="--pd=http://${CLUSTER_NAME}-pd:2379 \
+ARGS="--pd={{ .Scheme }}://${CLUSTER_NAME}-pd:2379 \
--advertise-addr=${POD_NAME}.${HEADLESS_SERVICE_NAME}.${NAMESPACE}.svc:20160 \
--addr=0.0.0.0:20160 \
--status-addr=0.0.0.0:20180 \
@@ -225,6 +228,11 @@ ARGS="--pd=http://${CLUSTER_NAME}-pd:2379 \
--config=/etc/tikv/tikv.toml
"
+if [ ! -z "${STORE_LABELS:-}" ]; then
+ LABELS=" --labels ${STORE_LABELS} "
+ ARGS="${ARGS}${LABELS}"
+fi
+
echo "starting tikv-server ..."
echo "/tikv-server ${ARGS}"
exec /tikv-server ${ARGS}
@@ -270,7 +278,11 @@ var tidbInitStartScriptTpl = template.Must(template.New("tidb-init-start-script"
host = '{{ .ClusterName }}-tidb'
permit_host = '{{ .PermitHost }}'
port = 4000
+{{- if .TLS }}
+conn = MySQLdb.connect(host=host, port=port, user='root', connect_timeout=5, ssl={'ca': '{{ .CAPath }}', 'cert': '{{ .CertPath }}', 'key': '{{ .KeyPath }}'})
+{{- else }}
conn = MySQLdb.connect(host=host, port=port, user='root', connect_timeout=5)
+{{- end }}
{{- if .PasswordSet }}
password_dir = '/etc/tidb/password'
for file in os.listdir(password_dir):
@@ -294,6 +306,7 @@ if permit_host != '%%':
conn.cursor().execute("update mysql.user set Host=%s where User='root';", (permit_host,))
conn.cursor().execute("flush privileges;")
conn.commit()
+conn.close()
`))
type TiDBInitStartScriptModel struct {
@@ -301,6 +314,10 @@ type TiDBInitStartScriptModel struct {
PermitHost string
PasswordSet bool
InitSQL bool
+ TLS bool
+ CAPath string
+ CertPath string
+ KeyPath string
}
func RenderTiDBInitStartScript(model *TiDBInitStartScriptModel) (string, error) {
diff --git a/pkg/manager/member/tidb_discovery_manager.go b/pkg/manager/member/tidb_discovery_manager.go
index 1d6f056e3c..f362724093 100644
--- a/pkg/manager/member/tidb_discovery_manager.go
+++ b/pkg/manager/member/tidb_discovery_manager.go
@@ -14,6 +14,8 @@
package member
import (
+ "encoding/json"
+
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/label"
@@ -81,8 +83,11 @@ func (m *realTidbDiscoveryManager) Reconcile(tc *v1alpha1.TidbCluster) error {
if err != nil {
return controller.RequeueErrorf("error creating or updating discovery rolebinding: %v", err)
}
-
- deploy, err := m.ctrl.CreateOrUpdateDeployment(tc, getTidbDiscoveryDeployment(tc))
+ d, err := getTidbDiscoveryDeployment(tc)
+ if err != nil {
+ return controller.RequeueErrorf("error generating discovery deployment: %v", err)
+ }
+ deploy, err := m.ctrl.CreateOrUpdateDeployment(tc, d)
if err != nil {
return controller.RequeueErrorf("error creating or updating discovery service: %v", err)
}
@@ -111,9 +116,9 @@ func getTidbDiscoveryService(tc *v1alpha1.TidbCluster, deploy *appsv1.Deployment
}
}
-func getTidbDiscoveryDeployment(tc *v1alpha1.TidbCluster) *appsv1.Deployment {
+func getTidbDiscoveryDeployment(tc *v1alpha1.TidbCluster) (*appsv1.Deployment, error) {
meta, l := getDiscoveryMeta(tc, controller.DiscoveryMemberName)
- return &appsv1.Deployment{
+ d := &appsv1.Deployment{
ObjectMeta: meta,
Spec: appsv1.DeploymentSpec{
Replicas: controller.Int32Ptr(1),
@@ -150,6 +155,15 @@ func getTidbDiscoveryDeployment(tc *v1alpha1.TidbCluster) *appsv1.Deployment {
},
},
}
+ b, err := json.Marshal(d.Spec.Template.Spec)
+ if err != nil {
+ return nil, err
+ }
+ if d.Annotations == nil {
+ d.Annotations = map[string]string{}
+ }
+ d.Annotations[controller.LastAppliedPodTemplate] = string(b)
+ return d, nil
}
func getDiscoveryMeta(tc *v1alpha1.TidbCluster, nameFunc func(string) string) (metav1.ObjectMeta, label.Label) {
diff --git a/pkg/manager/member/tidb_failover.go b/pkg/manager/member/tidb_failover.go
index e0102c371f..17df4353ee 100644
--- a/pkg/manager/member/tidb_failover.go
+++ b/pkg/manager/member/tidb_failover.go
@@ -14,21 +14,26 @@
package member
import (
+ "fmt"
"time"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- glog "k8s.io/klog"
+ "k8s.io/client-go/tools/record"
+ "k8s.io/klog"
)
type tidbFailover struct {
tidbFailoverPeriod time.Duration
+ recorder record.EventRecorder
}
// NewTiDBFailover returns a tidbFailover instance
-func NewTiDBFailover(failoverPeriod time.Duration) Failover {
+func NewTiDBFailover(failoverPeriod time.Duration, recorder record.EventRecorder) Failover {
return &tidbFailover{
tidbFailoverPeriod: failoverPeriod,
+ recorder: recorder,
}
}
@@ -41,26 +46,28 @@ func (tf *tidbFailover) Failover(tc *v1alpha1.TidbCluster) error {
_, exist := tc.Status.TiDB.FailureMembers[tidbMember.Name]
if exist && tidbMember.Health {
delete(tc.Status.TiDB.FailureMembers, tidbMember.Name)
- glog.Infof("tidb failover: delete %s from tidb failoverMembers", tidbMember.Name)
+ klog.Infof("tidb failover: delete %s from tidb failoverMembers", tidbMember.Name)
}
}
- if tc.Spec.TiDB.MaxFailoverCount != nil {
+ if tc.Spec.TiDB.MaxFailoverCount != nil && *tc.Spec.TiDB.MaxFailoverCount > 0 {
maxFailoverCount := *tc.Spec.TiDB.MaxFailoverCount
- if maxFailoverCount > 0 && len(tc.Status.TiDB.FailureMembers) >= int(maxFailoverCount) {
- glog.Warningf("the failure members count reached the limit:%d", tc.Spec.TiDB.MaxFailoverCount)
+ if len(tc.Status.TiDB.FailureMembers) >= int(maxFailoverCount) {
+ klog.Warningf("the failure members count reached the limit:%d", tc.Spec.TiDB.MaxFailoverCount)
return nil
}
- }
- for _, tidbMember := range tc.Status.TiDB.Members {
- _, exist := tc.Status.TiDB.FailureMembers[tidbMember.Name]
- deadline := tidbMember.LastTransitionTime.Add(tf.tidbFailoverPeriod)
- if !tidbMember.Health && time.Now().After(deadline) && !exist {
- tc.Status.TiDB.FailureMembers[tidbMember.Name] = v1alpha1.TiDBFailureMember{
- PodName: tidbMember.Name,
- CreatedAt: metav1.Now(),
+ for _, tidbMember := range tc.Status.TiDB.Members {
+ _, exist := tc.Status.TiDB.FailureMembers[tidbMember.Name]
+ deadline := tidbMember.LastTransitionTime.Add(tf.tidbFailoverPeriod)
+ if !tidbMember.Health && time.Now().After(deadline) && !exist {
+ tc.Status.TiDB.FailureMembers[tidbMember.Name] = v1alpha1.TiDBFailureMember{
+ PodName: tidbMember.Name,
+ CreatedAt: metav1.Now(),
+ }
+ msg := fmt.Sprintf("tidb[%s] is unhealthy", tidbMember.Name)
+ tf.recorder.Event(tc, corev1.EventTypeWarning, unHealthEventReason, fmt.Sprintf(unHealthEventMsgPattern, "tidb", tidbMember.Name, msg))
+ break
}
- break
}
}
@@ -71,7 +78,8 @@ func (tf *tidbFailover) Recover(tc *v1alpha1.TidbCluster) {
tc.Status.TiDB.FailureMembers = nil
}
-type fakeTiDBFailover struct{}
+type fakeTiDBFailover struct {
+}
// NewFakeTiDBFailover returns a fake Failover
func NewFakeTiDBFailover() Failover {
diff --git a/pkg/manager/member/tidb_failover_test.go b/pkg/manager/member/tidb_failover_test.go
index 26a48bb164..1b15420fa6 100644
--- a/pkg/manager/member/tidb_failover_test.go
+++ b/pkg/manager/member/tidb_failover_test.go
@@ -19,6 +19,7 @@ import (
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/tools/record"
"k8s.io/utils/pointer"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
@@ -200,7 +201,7 @@ func TestFakeTiDBFailoverFailover(t *testing.T) {
t.Expect(err).NotTo(HaveOccurred())
},
expectFn: func(t *GomegaWithT, tc *v1alpha1.TidbCluster) {
- t.Expect(len(tc.Status.TiDB.FailureMembers)).To(Equal(4))
+ t.Expect(len(tc.Status.TiDB.FailureMembers)).To(Equal(3))
t.Expect(int(tc.Spec.TiDB.Replicas)).To(Equal(2))
},
},
@@ -382,7 +383,8 @@ func TestFakeTiDBFailoverRecover(t *testing.T) {
}
func newTiDBFailover() Failover {
- return &tidbFailover{tidbFailoverPeriod: time.Duration(5 * time.Minute)}
+ recorder := record.NewFakeRecorder(100)
+ return &tidbFailover{tidbFailoverPeriod: time.Duration(5 * time.Minute), recorder: recorder}
}
func newTidbClusterForTiDBFailover() *v1alpha1.TidbCluster {
diff --git a/pkg/manager/member/tidb_init_manager.go b/pkg/manager/member/tidb_init_manager.go
index b638782578..065995caa9 100644
--- a/pkg/manager/member/tidb_init_manager.go
+++ b/pkg/manager/member/tidb_init_manager.go
@@ -30,6 +30,7 @@ import (
listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/label"
+ "github.com/pingcap/tidb-operator/pkg/util"
)
const (
@@ -59,6 +60,7 @@ type tidbInitManager struct {
jobLister batchlisters.JobLister
genericCli client.Client
tiLister listers.TidbInitializerLister
+ tcLister listers.TidbClusterLister
typedControl controller.TypedControlInterface
}
@@ -67,12 +69,14 @@ func NewTiDBInitManager(
jobLister batchlisters.JobLister,
genericCli client.Client,
tiLister listers.TidbInitializerLister,
+ tcLister listers.TidbClusterLister,
typedControl controller.TypedControlInterface,
) InitManager {
return &tidbInitManager{
jobLister,
genericCli,
tiLister,
+ tcLister,
typedControl,
}
}
@@ -134,6 +138,7 @@ func (tm *tidbInitManager) syncTiDBInitConfigMap(ti *v1alpha1.TidbInitializer) e
name := controller.TiDBInitializerMemberName(ti.Spec.Clusters.Name)
ns := ti.Namespace
cm := &corev1.ConfigMap{}
+ tcName := ti.Spec.Clusters.Name
exist, err := tm.typedControl.Exist(client.ObjectKey{
Namespace: ns,
@@ -146,7 +151,12 @@ func (tm *tidbInitManager) syncTiDBInitConfigMap(ti *v1alpha1.TidbInitializer) e
return nil
}
- newCm, err := getTiDBInitConfigMap(ti)
+ tc, err := tm.tcLister.TidbClusters(ns).Get(tcName)
+ if err != nil {
+ return err
+ }
+
+ newCm, err := getTiDBInitConfigMap(ti, tc.Spec.TiDB.IsTLSClientEnabled())
if err != nil {
return err
}
@@ -187,6 +197,13 @@ func (tm *tidbInitManager) syncTiDBInitJob(ti *v1alpha1.TidbInitializer) error {
func (tm *tidbInitManager) makeTiDBInitJob(ti *v1alpha1.TidbInitializer) (*batchv1.Job, error) {
jobName := controller.TiDBInitializerMemberName(ti.Spec.Clusters.Name)
+ ns := ti.Namespace
+ tcName := ti.Spec.Clusters.Name
+
+ tc, err := tm.tcLister.TidbClusters(ns).Get(tcName)
+ if err != nil {
+ return nil, err
+ }
var envs []corev1.EnvVar
if ti.Spec.Timezone != "" {
@@ -207,6 +224,21 @@ func (tm *tidbInitManager) makeTiDBInitJob(ti *v1alpha1.TidbInitializer) (*batch
var vms []corev1.VolumeMount
var vs []corev1.Volume
+ if tc.Spec.TiDB.IsTLSClientEnabled() {
+ vms = append(vms, corev1.VolumeMount{
+ Name: "tidb-client-tls",
+ ReadOnly: true,
+ MountPath: util.TiDBClientTLSPath,
+ })
+ vs = append(vs, corev1.Volume{
+ Name: "tidb-client-tls",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: util.TiDBClientTLSSecretName(tcName),
+ },
+ },
+ })
+ }
vms = append(vms, corev1.VolumeMount{
Name: startKey,
ReadOnly: true,
@@ -335,7 +367,7 @@ func (tm *tidbInitManager) makeTiDBInitJob(ti *v1alpha1.TidbInitializer) (*batch
return job, nil
}
-func getTiDBInitConfigMap(ti *v1alpha1.TidbInitializer) (*corev1.ConfigMap, error) {
+func getTiDBInitConfigMap(ti *v1alpha1.TidbInitializer, tlsClientEnabled bool) (*corev1.ConfigMap, error) {
var initSQL, passwdSet bool
permitHost := ti.GetPermitHost()
@@ -354,12 +386,19 @@ func getTiDBInitConfigMap(ti *v1alpha1.TidbInitializer) (*corev1.ConfigMap, erro
return nil, err
}
- startScript, err := RenderTiDBInitStartScript(&TiDBInitStartScriptModel{
+ initModel := &TiDBInitStartScriptModel{
ClusterName: ti.Spec.Clusters.Name,
PermitHost: permitHost,
InitSQL: initSQL,
PasswordSet: passwdSet,
- })
+ }
+ if tlsClientEnabled {
+ initModel.TLS = true
+ initModel.CAPath = path.Join(util.TiDBClientTLSPath, corev1.ServiceAccountRootCAKey)
+ initModel.CertPath = path.Join(util.TiDBClientTLSPath, corev1.TLSCertKey)
+ initModel.KeyPath = path.Join(util.TiDBClientTLSPath, corev1.TLSPrivateKeyKey)
+ }
+ startScript, err := RenderTiDBInitStartScript(initModel)
if err != nil {
return nil, err
}
diff --git a/pkg/manager/member/tidb_member_manager.go b/pkg/manager/member/tidb_member_manager.go
index d99995b25e..97b8647a06 100644
--- a/pkg/manager/member/tidb_member_manager.go
+++ b/pkg/manager/member/tidb_member_manager.go
@@ -34,6 +34,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
v1 "k8s.io/client-go/listers/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
+ "k8s.io/klog"
"k8s.io/utils/pointer"
)
@@ -47,6 +48,10 @@ const (
serverCertPath = "/var/lib/tidb-server-tls"
// serviceAccountCAPath is where is CABundle of serviceaccount locates
serviceAccountCAPath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+ // tlsSecretRootCAKey is the key used in tls secret for the root CA.
+ // When user use self-signed certificates, the root CA must be provided. We
+ // following the same convention used in Kubernetes service token.
+ tlsSecretRootCAKey = corev1.ServiceAccountRootCAKey
)
type tidbMemberManager struct {
@@ -54,7 +59,6 @@ type tidbMemberManager struct {
svcControl controller.ServiceControlInterface
tidbControl controller.TiDBControlInterface
typedControl controller.TypedControlInterface
- certControl controller.CertControlInterface
setLister v1.StatefulSetLister
svcLister corelisters.ServiceLister
podLister corelisters.PodLister
@@ -69,12 +73,10 @@ type tidbMemberManager struct {
func NewTiDBMemberManager(setControl controller.StatefulSetControlInterface,
svcControl controller.ServiceControlInterface,
tidbControl controller.TiDBControlInterface,
- certControl controller.CertControlInterface,
typedControl controller.TypedControlInterface,
setLister v1.StatefulSetLister,
svcLister corelisters.ServiceLister,
podLister corelisters.PodLister,
- cmLister corelisters.ConfigMapLister,
tidbUpgrader Upgrader,
autoFailover bool,
tidbFailover Failover) manager.Manager {
@@ -83,11 +85,9 @@ func NewTiDBMemberManager(setControl controller.StatefulSetControlInterface,
svcControl: svcControl,
tidbControl: tidbControl,
typedControl: typedControl,
- certControl: certControl,
setLister: setLister,
svcLister: svcLister,
podLister: podLister,
- cmLister: cmLister,
tidbUpgrader: tidbUpgrader,
autoFailover: autoFailover,
tidbFailover: tidbFailover,
@@ -108,15 +108,21 @@ func (tmm *tidbMemberManager) Sync(tc *v1alpha1.TidbCluster) error {
return err
}
- // Sync Tidb StatefulSet
- if err := tmm.syncTiDBStatefulSetForTidbCluster(tc); err != nil {
+ // Sync TiDB Service before syncing TiDB StatefulSet
+ if err := tmm.syncTiDBService(tc); err != nil {
return err
}
- return tmm.syncTiDBService(tc)
+ // Sync TiDB StatefulSet
+ return tmm.syncTiDBStatefulSetForTidbCluster(tc)
}
func (tmm *tidbMemberManager) syncTiDBHeadlessServiceForTidbCluster(tc *v1alpha1.TidbCluster) error {
+ if tc.Spec.Paused {
+ klog.V(4).Infof("tidb cluster %s/%s is paused, skip syncing for tidb headless service", tc.GetNamespace(), tc.GetName())
+ return nil
+ }
+
ns := tc.GetNamespace()
tcName := tc.GetName()
@@ -164,6 +170,15 @@ func (tmm *tidbMemberManager) syncTiDBStatefulSetForTidbCluster(tc *v1alpha1.Tid
setNotExist := errors.IsNotFound(err)
oldTiDBSet := oldTiDBSetTemp.DeepCopy()
+ if err = tmm.syncTidbClusterStatus(tc, oldTiDBSet); err != nil {
+ return err
+ }
+
+ if tc.Spec.Paused {
+ klog.V(4).Infof("tidb cluster %s/%s is paused, skip syncing for tidb statefulset", tc.GetNamespace(), tc.GetName())
+ return nil
+ }
+
cm, err := tmm.syncTiDBConfigMap(tc, oldTiDBSet)
if err != nil {
return err
@@ -175,22 +190,6 @@ func (tmm *tidbMemberManager) syncTiDBStatefulSetForTidbCluster(tc *v1alpha1.Tid
if err != nil {
return err
}
- if tc.IsTLSClusterEnabled() {
- err := tmm.syncTiDBClusterCerts(tc)
- if err != nil {
- return err
- }
- }
- if tc.Spec.TiDB.IsTLSClientEnabled() {
- err := tmm.syncTiDBServerCerts(tc)
- if err != nil {
- return err
- }
- err = tmm.syncTiDBClientCerts(tc)
- if err != nil {
- return err
- }
- }
err = tmm.setControl.CreateStatefulSet(tc, newTiDBSet)
if err != nil {
return err
@@ -199,17 +198,13 @@ func (tmm *tidbMemberManager) syncTiDBStatefulSetForTidbCluster(tc *v1alpha1.Tid
return nil
}
- if err = tmm.syncTidbClusterStatus(tc, oldTiDBSet); err != nil {
- return err
- }
-
if !templateEqual(newTiDBSet, oldTiDBSet) || tc.Status.TiDB.Phase == v1alpha1.UpgradePhase {
if err := tmm.tidbUpgrader.Upgrade(tc, oldTiDBSet, newTiDBSet); err != nil {
return err
}
}
- if tmm.autoFailover {
+ if tmm.autoFailover && tc.Spec.TiDB.MaxFailoverCount != nil {
if tc.Spec.TiDB.Replicas == int32(0) && tc.Status.TiDB.FailureMembers != nil {
tmm.tidbFailover.Recover(tc)
}
@@ -225,102 +220,12 @@ func (tmm *tidbMemberManager) syncTiDBStatefulSetForTidbCluster(tc *v1alpha1.Tid
return updateStatefulSet(tmm.setControl, tc, newTiDBSet, oldTiDBSet)
}
-// syncTiDBClusterCerts creates the cert pair for TiDB if not exist, the cert
-// pair is used to communicate with other TiDB components, like TiKVs and PDs
-func (tmm *tidbMemberManager) syncTiDBClusterCerts(tc *v1alpha1.TidbCluster) error {
- ns := tc.GetNamespace()
- tcName := tc.GetName()
- svcName := controller.TiDBMemberName(tcName)
- peerName := controller.TiDBPeerMemberName(tcName)
-
- if tmm.certControl.CheckSecret(ns, svcName) {
- return nil
- }
-
- hostList := []string{
- svcName,
- peerName,
- fmt.Sprintf("%s.%s", svcName, ns),
- fmt.Sprintf("%s.%s", peerName, ns),
- fmt.Sprintf("*.%s.%s", peerName, ns),
- }
-
- ipList := []string{
- "127.0.0.1", "::1", // able to access https endpoint via loopback network
- }
-
- certOpts := &controller.TiDBClusterCertOptions{
- Namespace: ns,
- Instance: tcName,
- CommonName: svcName,
- HostList: hostList,
- IPList: ipList,
- Component: "tidb",
- Suffix: "tidb",
- }
-
- return tmm.certControl.Create(controller.GetOwnerRef(tc), certOpts)
-}
-
-// syncTiDBServerCerts creates the cert pair for TiDB if not exist, the cert
-// pair is used to communicate with DB clients with encrypted connections
-func (tmm *tidbMemberManager) syncTiDBServerCerts(tc *v1alpha1.TidbCluster) error {
- suffix := "tidb-server"
- ns := tc.GetNamespace()
- tcName := tc.GetName()
- svcName := fmt.Sprintf("%s-%s", tcName, suffix)
-
- if tmm.certControl.CheckSecret(ns, svcName) {
- return nil
- }
-
- hostList := []string{
- svcName,
- fmt.Sprintf("%s.%s", svcName, ns),
- }
-
- certOpts := &controller.TiDBClusterCertOptions{
- Namespace: ns,
- Instance: tcName,
- CommonName: svcName,
- HostList: hostList,
- Component: "tidb",
- Suffix: suffix,
- }
-
- return tmm.certControl.Create(controller.GetOwnerRef(tc), certOpts)
-}
-
-// syncTiDBClientCerts creates the cert pair for TiDB if not exist, the cert
-// pair is used for DB clients to connect to TiDB server with encrypted connections
-func (tmm *tidbMemberManager) syncTiDBClientCerts(tc *v1alpha1.TidbCluster) error {
- suffix := "tidb-client"
- ns := tc.GetNamespace()
- tcName := tc.GetName()
- commonName := fmt.Sprintf("%s-%s", tcName, suffix)
-
- if tmm.certControl.CheckSecret(ns, commonName) {
+func (tmm *tidbMemberManager) syncTiDBService(tc *v1alpha1.TidbCluster) error {
+ if tc.Spec.Paused {
+ klog.V(4).Infof("tidb cluster %s/%s is paused, skip syncing for tidb service", tc.GetNamespace(), tc.GetName())
return nil
}
- hostList := []string{
- commonName,
- }
-
- certOpts := &controller.TiDBClusterCertOptions{
- Namespace: ns,
- Instance: tcName,
- CommonName: commonName,
- HostList: hostList,
- Component: "tidb",
- Suffix: suffix,
- }
-
- return tmm.certControl.Create(controller.GetOwnerRef(tc), certOpts)
-}
-
-func (tmm *tidbMemberManager) syncTiDBService(tc *v1alpha1.TidbCluster) error {
-
newSvc := getNewTiDBServiceOrNil(tc)
// TODO: delete tidb service if user remove the service spec deliberately
if newSvc == nil {
@@ -408,17 +313,17 @@ func getTiDBConfigMap(tc *v1alpha1.TidbCluster) (*corev1.ConfigMap, error) {
if config.Security == nil {
config.Security = &v1alpha1.Security{}
}
- config.Security.ClusterSSLCA = pointer.StringPtr(serviceAccountCAPath)
- config.Security.ClusterSSLCert = pointer.StringPtr(path.Join(clusterCertPath, "cert"))
- config.Security.ClusterSSLKey = pointer.StringPtr(path.Join(clusterCertPath, "key"))
+ config.Security.ClusterSSLCA = pointer.StringPtr(path.Join(clusterCertPath, tlsSecretRootCAKey))
+ config.Security.ClusterSSLCert = pointer.StringPtr(path.Join(clusterCertPath, corev1.TLSCertKey))
+ config.Security.ClusterSSLKey = pointer.StringPtr(path.Join(clusterCertPath, corev1.TLSPrivateKeyKey))
}
if tc.Spec.TiDB.IsTLSClientEnabled() {
if config.Security == nil {
config.Security = &v1alpha1.Security{}
}
- config.Security.SSLCA = pointer.StringPtr(serviceAccountCAPath)
- config.Security.SSLCert = pointer.StringPtr(path.Join(serverCertPath, "cert"))
- config.Security.SSLKey = pointer.StringPtr(path.Join(serverCertPath, "key"))
+ config.Security.SSLCA = pointer.StringPtr(path.Join(serverCertPath, tlsSecretRootCAKey))
+ config.Security.SSLCert = pointer.StringPtr(path.Join(serverCertPath, corev1.TLSCertKey))
+ config.Security.SSLKey = pointer.StringPtr(path.Join(serverCertPath, corev1.TLSPrivateKeyKey))
}
confText, err := MarshalTOML(config)
if err != nil {
@@ -474,10 +379,13 @@ func getNewTiDBServiceOrNil(tc *v1alpha1.TidbCluster) *corev1.Service {
instanceName := tc.GetInstanceName()
tidbLabels := label.New().Instance(instanceName).TiDB().Labels()
svcName := controller.TiDBMemberName(tcName)
-
+ portName := "mysql-client"
+ if svcSpec.PortName != nil {
+ portName = *svcSpec.PortName
+ }
ports := []corev1.ServicePort{
{
- Name: "mysql-client",
+ Name: portName,
Port: 4000,
TargetPort: intstr.FromInt(4000),
Protocol: corev1.ProtocolTCP,
@@ -551,6 +459,7 @@ func getNewTiDBHeadlessServiceForTidbCluster(tc *v1alpha1.TidbCluster) *corev1.S
func getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap) *apps.StatefulSet {
ns := tc.GetNamespace()
tcName := tc.GetName()
+ headlessSvcName := controller.TiDBPeerMemberName(tcName)
baseTiDBSpec := tc.BaseTiDBSpec()
instanceName := tc.GetInstanceName()
tidbConfigMap := controller.MemberConfigMapName(tc, v1alpha1.TiDBMemberType)
@@ -598,16 +507,17 @@ func getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap)
vols = append(vols, corev1.Volume{
Name: "tidb-tls", VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
- SecretName: controller.TiDBMemberName(tcName),
+ SecretName: util.ClusterTLSSecretName(tcName, label.TiDBLabelVal),
},
},
})
}
if tc.Spec.TiDB.IsTLSClientEnabled() {
+ secretName := tlsClientSecretName(tc)
vols = append(vols, corev1.Volume{
Name: "tidb-server-tls", VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
- SecretName: fmt.Sprintf("%s-%s", controller.TiDBMemberName(tcName), "server"),
+ SecretName: secretName,
},
},
})
@@ -693,12 +603,28 @@ func getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap)
Name: "SLOW_LOG_FILE",
Value: slowLogFileEnvVal,
},
+ {
+ Name: "POD_NAME",
+ ValueFrom: &corev1.EnvVarSource{
+ FieldRef: &corev1.ObjectFieldSelector{
+ FieldPath: "metadata.name",
+ },
+ },
+ },
+ {
+ Name: "NAMESPACE",
+ ValueFrom: &corev1.EnvVarSource{
+ FieldRef: &corev1.ObjectFieldSelector{
+ FieldPath: "metadata.namespace",
+ },
+ },
+ },
+ {
+ Name: "HEADLESS_SERVICE_NAME",
+ Value: headlessSvcName,
+ },
}
- scheme := corev1.URISchemeHTTP
- if tc.IsTLSClusterEnabled() {
- scheme = corev1.URISchemeHTTPS
- }
containers = append(containers, corev1.Container{
Name: v1alpha1.TiDBMemberType.String(),
Image: tc.TiDBImage(),
@@ -718,13 +644,11 @@ func getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap)
},
VolumeMounts: volMounts,
Resources: controller.ContainerResource(tc.Spec.TiDB.ResourceRequirements),
- Env: envs,
+ Env: util.AppendEnv(envs, baseTiDBSpec.Env()),
ReadinessProbe: &corev1.Probe{
Handler: corev1.Handler{
- HTTPGet: &corev1.HTTPGetAction{
- Path: "/status",
- Port: intstr.FromInt(10080),
- Scheme: scheme,
+ TCPSocket: &corev1.TCPSocketAction{
+ Port: intstr.FromInt(4000),
},
},
InitialDelaySeconds: int32(10),
@@ -773,6 +697,11 @@ func getNewTiDBSetForTidbCluster(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap)
}
func (tmm *tidbMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, set *apps.StatefulSet) error {
+ if set == nil {
+ // skip if not created yet
+ return nil
+ }
+
tc.Status.TiDB.StatefulSet = &set.Status
upgrading, err := tmm.tidbStatefulSetIsUpgradingFn(tmm.podLister, set, tc)
@@ -816,7 +745,11 @@ func (tmm *tidbMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, se
tidbStatus[name] = newTidbMember
}
tc.Status.TiDB.Members = tidbStatus
-
+ tc.Status.TiDB.Image = ""
+ c := filterContainer(set, "tidb")
+ if c != nil {
+ tc.Status.TiDB.Image = c.Image
+ }
return nil
}
@@ -847,6 +780,10 @@ func tidbStatefulSetIsUpgrading(podLister corelisters.PodLister, set *apps.State
return false, nil
}
+func tlsClientSecretName(tc *v1alpha1.TidbCluster) string {
+ return fmt.Sprintf("%s-server-secret", controller.TiDBMemberName(tc.Name))
+}
+
type FakeTiDBMemberManager struct {
err error
}
diff --git a/pkg/manager/member/tidb_member_manager_test.go b/pkg/manager/member/tidb_member_manager_test.go
index a6b973d102..5f62c8d4cc 100644
--- a/pkg/manager/member/tidb_member_manager_test.go
+++ b/pkg/manager/member/tidb_member_manager_test.go
@@ -725,6 +725,22 @@ func TestTiDBMemberManagerSyncTidbService(t *testing.T) {
g.Expect(svc.Spec.ClusterIP).To(Equal("8.8.8.8"))
},
},
+ {
+ name: "Create service with portName",
+ prepare: func(tc *v1alpha1.TidbCluster, _ *fakeIndexers) {
+ tc.Spec.TiDB.Service = &v1alpha1.TiDBServiceSpec{
+ ServiceSpec: v1alpha1.ServiceSpec{
+ Type: corev1.ServiceTypeClusterIP,
+ PortName: pointer.StringPtr("mysql-tidb"),
+ },
+ }
+ },
+ expectFn: func(g *GomegaWithT, err error, svc *corev1.Service) {
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(svc).NotTo(BeNil())
+ g.Expect(svc.Spec.Ports[0].Name).To(Equal("mysql-tidb"))
+ },
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -738,7 +754,6 @@ type fakeIndexers struct {
tc cache.Indexer
svc cache.Indexer
eps cache.Indexer
- csr cache.Indexer
secret cache.Indexer
set cache.Indexer
}
@@ -751,13 +766,10 @@ func newFakeTiDBMemberManager() (*tidbMemberManager, *controller.FakeStatefulSet
svcInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Services()
epsInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Endpoints()
podInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Pods()
- csrInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Certificates().V1beta1().CertificateSigningRequests()
secretInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().Secrets()
cmInformer := kubeinformers.NewSharedInformerFactory(kubeCli, 0).Core().V1().ConfigMaps()
setControl := controller.NewFakeStatefulSetControl(setInformer, tcInformer)
svcControl := controller.NewFakeServiceControl(svcInformer, epsInformer, tcInformer)
- secControl := controller.NewFakeSecretControl(kubeCli, secretInformer.Lister())
- certControl := controller.NewFakeCertControl(kubeCli, csrInformer.Lister(), secControl)
genericControl := controller.NewFakeGenericControl()
tidbUpgrader := NewFakeTiDBUpgrader()
tidbFailover := NewFakeTiDBFailover()
@@ -768,7 +780,6 @@ func newFakeTiDBMemberManager() (*tidbMemberManager, *controller.FakeStatefulSet
svcControl,
tidbControl,
controller.NewTypedControl(genericControl),
- certControl,
setInformer.Lister(),
svcInformer.Lister(),
podInformer.Lister(),
@@ -783,7 +794,6 @@ func newFakeTiDBMemberManager() (*tidbMemberManager, *controller.FakeStatefulSet
tc: tcInformer.Informer().GetIndexer(),
svc: svcInformer.Informer().GetIndexer(),
eps: epsInformer.Informer().GetIndexer(),
- csr: csrInformer.Informer().GetIndexer(),
secret: secretInformer.Informer().GetIndexer(),
set: setInformer.Informer().GetIndexer(),
}
@@ -1612,13 +1622,13 @@ func TestGetTiDBConfigMap(t *testing.T) {
Namespace: "ns",
},
Spec: v1alpha1.TidbClusterSpec{
- EnableTLSCluster: pointer.BoolPtr(true),
+ TLSCluster: &v1alpha1.TLSCluster{Enabled: true},
TiDB: v1alpha1.TiDBSpec{
ComponentSpec: v1alpha1.ComponentSpec{
ConfigUpdateStrategy: &updateStrategy,
},
- EnableTLSClient: pointer.BoolPtr(true),
- Config: &v1alpha1.TiDBConfig{},
+ TLSClient: &v1alpha1.TiDBTLSClient{Enabled: true},
+ Config: &v1alpha1.TiDBConfig{},
},
},
},
@@ -1650,12 +1660,12 @@ func TestGetTiDBConfigMap(t *testing.T) {
Data: map[string]string{
"startup-script": "",
"config-file": `[security]
- ssl-ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
- ssl-cert = "/var/lib/tidb-server-tls/cert"
- ssl-key = "/var/lib/tidb-server-tls/key"
- cluster-ssl-ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
- cluster-ssl-cert = "/var/lib/tidb-tls/cert"
- cluster-ssl-key = "/var/lib/tidb-tls/key"
+ ssl-ca = "/var/lib/tidb-server-tls/ca.crt"
+ ssl-cert = "/var/lib/tidb-server-tls/tls.crt"
+ ssl-key = "/var/lib/tidb-server-tls/tls.key"
+ cluster-ssl-ca = "/var/lib/tidb-tls/ca.crt"
+ cluster-ssl-cert = "/var/lib/tidb-tls/tls.crt"
+ cluster-ssl-key = "/var/lib/tidb-tls/tls.key"
`,
},
},
@@ -1703,6 +1713,8 @@ func TestTiDBMemberManagerScaleToZeroReplica(t *testing.T) {
t.Log(test.name)
tc := newTidbClusterForTiDB()
+ tc.Spec.TiDB.MaxFailoverCount = pointer.Int32Ptr(3)
+ tc.Spec.TiKV.MaxFailoverCount = pointer.Int32Ptr(3)
tc.Status.TiKV.Stores = map[string]v1alpha1.TiKVStore{
"tikv-0": {PodName: "tikv-0", State: v1alpha1.TiKVStateUp},
}
diff --git a/pkg/manager/member/tidb_upgrader.go b/pkg/manager/member/tidb_upgrader.go
index 10c41e8422..8a3dd4db2a 100644
--- a/pkg/manager/member/tidb_upgrader.go
+++ b/pkg/manager/member/tidb_upgrader.go
@@ -19,7 +19,7 @@ import (
"github.com/pingcap/tidb-operator/pkg/controller"
apps "k8s.io/api/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
type tidbUpgrader struct {
@@ -70,7 +70,7 @@ func (tdu *tidbUpgrader) Upgrade(tc *v1alpha1.TidbCluster, oldSet *apps.Stateful
// If we encounter this situation, we will let the native statefulset controller do the upgrade completely, which may be unsafe for upgrading tidb.
// Therefore, in the production environment, we should try to avoid modifying the tidb statefulset update strategy directly.
newSet.Spec.UpdateStrategy = oldSet.Spec.UpdateStrategy
- glog.Warningf("tidbcluster: [%s/%s] tidb statefulset %s UpdateStrategy has been modified manually", ns, tcName, oldSet.GetName())
+ klog.Warningf("tidbcluster: [%s/%s] tidb statefulset %s UpdateStrategy has been modified manually", ns, tcName, oldSet.GetName())
return nil
}
diff --git a/pkg/manager/member/tiflash_failover.go b/pkg/manager/member/tiflash_failover.go
new file mode 100644
index 0000000000..08ffa919dd
--- /dev/null
+++ b/pkg/manager/member/tiflash_failover.go
@@ -0,0 +1,53 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package member
+
+import (
+ "time"
+
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+)
+
+type tiflashFailover struct {
+ tiflashFailoverPeriod time.Duration
+}
+
+// NewTiFlashFailover returns a tiflash Failover
+func NewTiFlashFailover(tiflashFailoverPeriod time.Duration) Failover {
+ return &tiflashFailover{tiflashFailoverPeriod}
+}
+
+// TODO: Finish the failover logic
+func (tff *tiflashFailover) Failover(tc *v1alpha1.TidbCluster) error {
+ return nil
+}
+
+func (tff *tiflashFailover) Recover(_ *v1alpha1.TidbCluster) {
+ // Do nothing now
+}
+
+type fakeTiFlashFailover struct{}
+
+// NewFakeTiFlashFailover returns a fake Failover
+func NewFakeTiFlashFailover() Failover {
+ return &fakeTiFlashFailover{}
+}
+
+func (ftff *fakeTiFlashFailover) Failover(_ *v1alpha1.TidbCluster) error {
+ return nil
+}
+
+func (ftff *fakeTiFlashFailover) Recover(_ *v1alpha1.TidbCluster) {
+ return
+}
diff --git a/pkg/manager/member/tiflash_member_manager.go b/pkg/manager/member/tiflash_member_manager.go
new file mode 100644
index 0000000000..a3ce2b9433
--- /dev/null
+++ b/pkg/manager/member/tiflash_member_manager.go
@@ -0,0 +1,855 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package member
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+
+ "github.com/pingcap/kvproto/pkg/metapb"
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ "github.com/pingcap/tidb-operator/pkg/controller"
+ "github.com/pingcap/tidb-operator/pkg/label"
+ "github.com/pingcap/tidb-operator/pkg/manager"
+ "github.com/pingcap/tidb-operator/pkg/pdapi"
+ "github.com/pingcap/tidb-operator/pkg/util"
+ apps "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
+ "k8s.io/apimachinery/pkg/util/uuid"
+ v1 "k8s.io/client-go/listers/apps/v1"
+ corelisters "k8s.io/client-go/listers/core/v1"
+ "k8s.io/klog"
+)
+
+const (
+ // tiflashClusterCertPath is where the cert for inter-cluster communication stored (if any)
+ tiflashClusterCertPath = "/var/lib/tiflash-tls"
+
+ //find a better way to manage store only managed by tiflash in Operator
+ tiflashStoreLimitPattern = `%s-tiflash-\d+\.%s-tiflash-peer\.%s\.svc\:\d+`
+)
+
+// tiflashMemberManager implements manager.Manager.
+type tiflashMemberManager struct {
+ setControl controller.StatefulSetControlInterface
+ svcControl controller.ServiceControlInterface
+ pdControl pdapi.PDControlInterface
+ typedControl controller.TypedControlInterface
+ setLister v1.StatefulSetLister
+ svcLister corelisters.ServiceLister
+ podLister corelisters.PodLister
+ nodeLister corelisters.NodeLister
+ autoFailover bool
+ tiflashFailover Failover
+ tiflashScaler Scaler
+ tiflashUpgrader Upgrader
+ tiflashStatefulSetIsUpgradingFn func(corelisters.PodLister, pdapi.PDControlInterface, *apps.StatefulSet, *v1alpha1.TidbCluster) (bool, error)
+}
+
+// NewTiFlashMemberManager returns a *tiflashMemberManager
+func NewTiFlashMemberManager(
+ pdControl pdapi.PDControlInterface,
+ setControl controller.StatefulSetControlInterface,
+ svcControl controller.ServiceControlInterface,
+ typedControl controller.TypedControlInterface,
+ setLister v1.StatefulSetLister,
+ svcLister corelisters.ServiceLister,
+ podLister corelisters.PodLister,
+ nodeLister corelisters.NodeLister,
+ autoFailover bool,
+ tiflashFailover Failover,
+ tiflashScaler Scaler,
+ tiflashUpgrader Upgrader) manager.Manager {
+ kvmm := tiflashMemberManager{
+ pdControl: pdControl,
+ podLister: podLister,
+ nodeLister: nodeLister,
+ setControl: setControl,
+ svcControl: svcControl,
+ typedControl: typedControl,
+ setLister: setLister,
+ svcLister: svcLister,
+ autoFailover: autoFailover,
+ tiflashFailover: tiflashFailover,
+ tiflashScaler: tiflashScaler,
+ tiflashUpgrader: tiflashUpgrader,
+ }
+ kvmm.tiflashStatefulSetIsUpgradingFn = tiflashStatefulSetIsUpgrading
+ return &kvmm
+}
+
+// Sync fulfills the manager.Manager interface
+func (tfmm *tiflashMemberManager) Sync(tc *v1alpha1.TidbCluster) error {
+ if tc.Spec.TiFlash == nil {
+ return nil
+ }
+
+ ns := tc.GetNamespace()
+ tcName := tc.GetName()
+
+ if !tc.PDIsAvailable() {
+ return controller.RequeueErrorf("TidbCluster: [%s/%s], waiting for PD cluster running", ns, tcName)
+ }
+
+ // Sync TiFlash Headless Service
+ if err := tfmm.syncHeadlessService(tc); err != nil {
+ return err
+ }
+
+ return tfmm.syncStatefulSet(tc)
+}
+
+func (tfmm *tiflashMemberManager) syncHeadlessService(tc *v1alpha1.TidbCluster) error {
+ if tc.Spec.Paused {
+ klog.V(4).Infof("tiflash cluster %s/%s is paused, skip syncing for tiflash service", tc.GetNamespace(), tc.GetName())
+ return nil
+ }
+
+ ns := tc.GetNamespace()
+ tcName := tc.GetName()
+
+ newSvc := getNewHeadlessService(tc)
+ oldSvcTmp, err := tfmm.svcLister.Services(ns).Get(controller.TiFlashPeerMemberName(tcName))
+ if errors.IsNotFound(err) {
+ err = controller.SetServiceLastAppliedConfigAnnotation(newSvc)
+ if err != nil {
+ return err
+ }
+ return tfmm.svcControl.CreateService(tc, newSvc)
+ }
+ if err != nil {
+ return err
+ }
+
+ oldSvc := oldSvcTmp.DeepCopy()
+
+ equal, err := controller.ServiceEqual(newSvc, oldSvc)
+ if err != nil {
+ return err
+ }
+ if !equal {
+ svc := *oldSvc
+ svc.Spec = newSvc.Spec
+ err = controller.SetServiceLastAppliedConfigAnnotation(newSvc)
+ if err != nil {
+ return err
+ }
+ _, err = tfmm.svcControl.UpdateService(tc, &svc)
+ return err
+ }
+
+ return nil
+}
+
+func (tfmm *tiflashMemberManager) syncStatefulSet(tc *v1alpha1.TidbCluster) error {
+ ns := tc.GetNamespace()
+ tcName := tc.GetName()
+
+ oldSetTmp, err := tfmm.setLister.StatefulSets(ns).Get(controller.TiFlashMemberName(tcName))
+ if err != nil && !errors.IsNotFound(err) {
+ return err
+ }
+ setNotExist := errors.IsNotFound(err)
+
+ oldSet := oldSetTmp.DeepCopy()
+
+ if err := tfmm.syncTidbClusterStatus(tc, oldSet); err != nil {
+ return err
+ }
+
+ if tc.Spec.Paused {
+ klog.V(4).Infof("tiflash cluster %s/%s is paused, skip syncing for tiflash statefulset", tc.GetNamespace(), tc.GetName())
+ return nil
+ }
+
+ cm, err := tfmm.syncConfigMap(tc, oldSet)
+ if err != nil {
+ return err
+ }
+
+ newSet, err := getNewStatefulSet(tc, cm)
+ if err != nil {
+ return err
+ }
+ if setNotExist {
+ err = SetStatefulSetLastAppliedConfigAnnotation(newSet)
+ if err != nil {
+ return err
+ }
+ err = tfmm.setControl.CreateStatefulSet(tc, newSet)
+ if err != nil {
+ return err
+ }
+ tc.Status.TiFlash.StatefulSet = &apps.StatefulSetStatus{}
+ return nil
+ }
+
+ if _, err := tfmm.setStoreLabelsForTiFlash(tc); err != nil {
+ return err
+ }
+
+ if !templateEqual(newSet, oldSet) || tc.Status.TiFlash.Phase == v1alpha1.UpgradePhase {
+ if err := tfmm.tiflashUpgrader.Upgrade(tc, oldSet, newSet); err != nil {
+ return err
+ }
+ }
+
+ if err := tfmm.tiflashScaler.Scale(tc, oldSet, newSet); err != nil {
+ return err
+ }
+
+ if tfmm.autoFailover && tc.Spec.TiFlash.MaxFailoverCount != nil {
+ if tc.TiFlashAllPodsStarted() && !tc.TiFlashAllStoresReady() {
+ if err := tfmm.tiflashFailover.Failover(tc); err != nil {
+ return err
+ }
+ }
+ }
+
+ return updateStatefulSet(tfmm.setControl, tc, newSet, oldSet)
+}
+
+func (tfmm *tiflashMemberManager) syncConfigMap(tc *v1alpha1.TidbCluster, set *apps.StatefulSet) (*corev1.ConfigMap, error) {
+ newCm, err := getTiFlashConfigMap(tc)
+ if err != nil {
+ return nil, err
+ }
+ if set != nil && tc.BaseTiFlashSpec().ConfigUpdateStrategy() == v1alpha1.ConfigUpdateStrategyInPlace {
+ inUseName := FindConfigMapVolume(&set.Spec.Template.Spec, func(name string) bool {
+ return strings.HasPrefix(name, controller.TiFlashMemberName(tc.Name))
+ })
+ if inUseName != "" {
+ newCm.Name = inUseName
+ }
+ }
+
+ return tfmm.typedControl.CreateOrUpdateConfigMap(tc, newCm)
+}
+
+func getNewHeadlessService(tc *v1alpha1.TidbCluster) *corev1.Service {
+ ns := tc.Namespace
+ tcName := tc.Name
+ instanceName := tc.GetInstanceName()
+ svcName := controller.TiFlashPeerMemberName(tcName)
+ svcLabel := label.New().Instance(instanceName).TiFlash().Labels()
+
+ svc := corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: svcName,
+ Namespace: ns,
+ Labels: svcLabel,
+ OwnerReferences: []metav1.OwnerReference{controller.GetOwnerRef(tc)},
+ },
+ Spec: corev1.ServiceSpec{
+ ClusterIP: "None",
+ Ports: []corev1.ServicePort{
+ {
+ Name: "tiflash",
+ Port: 3930,
+ TargetPort: intstr.FromInt(int(3930)),
+ Protocol: corev1.ProtocolTCP,
+ },
+ {
+ Name: "proxy",
+ Port: 20170,
+ TargetPort: intstr.FromInt(int(20170)),
+ Protocol: corev1.ProtocolTCP,
+ },
+ },
+ Selector: svcLabel,
+ PublishNotReadyAddresses: true,
+ },
+ }
+ return &svc
+}
+
+func getNewStatefulSet(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap) (*apps.StatefulSet, error) {
+ ns := tc.GetNamespace()
+ tcName := tc.GetName()
+ baseTiFlashSpec := tc.BaseTiFlashSpec()
+ spec := tc.Spec.TiFlash
+
+ tiflashConfigMap := controller.MemberConfigMapName(tc, v1alpha1.TiFlashMemberType)
+ if cm != nil {
+ tiflashConfigMap = cm.Name
+ }
+
+ // This should not happen as we have validaton for this field
+ if len(spec.StorageClaims) < 1 {
+ return nil, fmt.Errorf("storageClaims should be configured at least one item for tiflash, tidbcluster %s/%s", tc.Namespace, tc.Name)
+ }
+ pvcs, err := flashVolumeClaimTemplate(tc.Spec.TiFlash.StorageClaims)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse storage request for tiflash.StorageClaims, tidbcluster %s/%s, error: %v", tc.Namespace, tc.Name, err)
+ }
+ annMount, annVolume := annotationsMountVolume()
+ volMounts := []corev1.VolumeMount{
+ annMount,
+ }
+ for k := range spec.StorageClaims {
+ volMounts = append(volMounts, corev1.VolumeMount{
+ Name: fmt.Sprintf("data%d", k), MountPath: fmt.Sprintf("/data%d", k)})
+ }
+
+ // TiFlash does not support TLS yet
+ // if tc.IsTLSClusterEnabled() {
+ // volMounts = append(volMounts, corev1.VolumeMount{
+ // Name: "tiflash-tls", ReadOnly: true, MountPath: "/var/lib/tiflash-tls",
+ // })
+ // }
+
+ vols := []corev1.Volume{
+ annVolume,
+ {Name: "config", VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: tiflashConfigMap,
+ },
+ }},
+ },
+ }
+
+ // if tc.IsTLSClusterEnabled() {
+ // vols = append(vols, corev1.Volume{
+ // Name: "tiflash-tls", VolumeSource: corev1.VolumeSource{
+ // Secret: &corev1.SecretVolumeSource{
+ // SecretName: util.ClusterTLSSecretName(tc.Name, label.TiFlashLabelVal),
+ // },
+ // },
+ // })
+ // }
+
+ sysctls := "sysctl -w"
+ var initContainers []corev1.Container
+ if baseTiFlashSpec.Annotations() != nil {
+ init, ok := baseTiFlashSpec.Annotations()[label.AnnSysctlInit]
+ if ok && (init == label.AnnSysctlInitVal) {
+ if baseTiFlashSpec.PodSecurityContext() != nil && len(baseTiFlashSpec.PodSecurityContext().Sysctls) > 0 {
+ for _, sysctl := range baseTiFlashSpec.PodSecurityContext().Sysctls {
+ sysctls = sysctls + fmt.Sprintf(" %s=%s", sysctl.Name, sysctl.Value)
+ }
+ privileged := true
+ initContainers = append(initContainers, corev1.Container{
+ Name: "init",
+ Image: tc.HelperImage(),
+ Command: []string{
+ "sh",
+ "-c",
+ sysctls,
+ },
+ SecurityContext: &corev1.SecurityContext{
+ Privileged: &privileged,
+ },
+ })
+ }
+ }
+ }
+ // Init container is only used for the case where allowed-unsafe-sysctls
+ // cannot be enabled for kubelet, so clean the sysctl in statefulset
+ // SecurityContext if init container is enabled
+ podSecurityContext := baseTiFlashSpec.PodSecurityContext().DeepCopy()
+ if len(initContainers) > 0 {
+ podSecurityContext.Sysctls = []corev1.Sysctl{}
+ }
+
+ // Append init container for config files initialization
+ initVolMounts := []corev1.VolumeMount{
+ {Name: "data0", MountPath: "/data0"},
+ {Name: "config", ReadOnly: true, MountPath: "/etc/tiflash"},
+ }
+ initEnv := []corev1.EnvVar{
+ {
+ Name: "POD_NAME",
+ ValueFrom: &corev1.EnvVarSource{
+ FieldRef: &corev1.ObjectFieldSelector{
+ FieldPath: "metadata.name",
+ },
+ },
+ },
+ }
+ initContainers = append(initContainers, corev1.Container{
+ Name: "init",
+ Image: tc.HelperImage(),
+ Command: []string{
+ "sh",
+ "-c",
+ "set -ex;ordinal=`echo ${POD_NAME} | awk -F- '{print $NF}'`;sed s/POD_NUM/${ordinal}/g /etc/tiflash/config_templ.toml > /data0/config.toml;sed s/POD_NUM/${ordinal}/g /etc/tiflash/proxy_templ.toml > /data0/proxy.toml",
+ },
+ Env: initEnv,
+ VolumeMounts: initVolMounts,
+ })
+
+ tiflashLabel := labelTiFlash(tc)
+ setName := controller.TiFlashMemberName(tcName)
+ podAnnotations := CombineAnnotations(controller.AnnProm(8234), baseTiFlashSpec.Annotations())
+ stsAnnotations := getStsAnnotations(tc, label.TiFlashLabelVal)
+ capacity := controller.TiKVCapacity(tc.Spec.TiFlash.Limits)
+ headlessSvcName := controller.TiFlashPeerMemberName(tcName)
+
+ env := []corev1.EnvVar{
+ {
+ Name: "NAMESPACE",
+ ValueFrom: &corev1.EnvVarSource{
+ FieldRef: &corev1.ObjectFieldSelector{
+ FieldPath: "metadata.namespace",
+ },
+ },
+ },
+ {
+ Name: "CLUSTER_NAME",
+ Value: tcName,
+ },
+ {
+ Name: "HEADLESS_SERVICE_NAME",
+ Value: headlessSvcName,
+ },
+ {
+ Name: "CAPACITY",
+ Value: capacity,
+ },
+ {
+ Name: "TZ",
+ Value: tc.Spec.Timezone,
+ },
+ }
+ tiflashContainer := corev1.Container{
+ Name: v1alpha1.TiFlashMemberType.String(),
+ Image: tc.TiFlashImage(),
+ ImagePullPolicy: baseTiFlashSpec.ImagePullPolicy(),
+ Command: []string{"/bin/sh", "-c", "/tiflash/tiflash server --config-file /data0/config.toml"},
+ SecurityContext: &corev1.SecurityContext{
+ Privileged: tc.TiFlashContainerPrivilege(),
+ },
+ Ports: []corev1.ContainerPort{
+ {
+ Name: "tiflash",
+ ContainerPort: int32(3930),
+ Protocol: corev1.ProtocolTCP,
+ },
+ {
+ Name: "proxy",
+ ContainerPort: int32(20170),
+ Protocol: corev1.ProtocolTCP,
+ },
+ {
+ Name: "tcp",
+ ContainerPort: int32(9000),
+ Protocol: corev1.ProtocolTCP,
+ },
+ {
+ Name: "http",
+ ContainerPort: int32(8123),
+ Protocol: corev1.ProtocolTCP,
+ },
+ {
+ Name: "internal",
+ ContainerPort: int32(9009),
+ Protocol: corev1.ProtocolTCP,
+ },
+ {
+ Name: "metrics",
+ ContainerPort: int32(8234),
+ Protocol: corev1.ProtocolTCP,
+ },
+ },
+ VolumeMounts: volMounts,
+ Resources: controller.ContainerResource(tc.Spec.TiFlash.ResourceRequirements),
+ }
+ podSpec := baseTiFlashSpec.BuildPodSpec()
+ if baseTiFlashSpec.HostNetwork() {
+ podSpec.DNSPolicy = corev1.DNSClusterFirstWithHostNet
+ env = append(env, corev1.EnvVar{
+ Name: "POD_NAME",
+ ValueFrom: &corev1.EnvVarSource{
+ FieldRef: &corev1.ObjectFieldSelector{
+ FieldPath: "metadata.name",
+ },
+ },
+ })
+ }
+ tiflashContainer.Env = util.AppendEnv(env, baseTiFlashSpec.Env())
+ podSpec.Volumes = vols
+ podSpec.SecurityContext = podSecurityContext
+ podSpec.InitContainers = initContainers
+ podSpec.Containers = []corev1.Container{tiflashContainer}
+ podSpec.Containers = append(podSpec.Containers, buildTiFlashSidecarContainers(tc)...)
+ podSpec.ServiceAccountName = tc.Spec.TiFlash.ServiceAccount
+
+ tiflashset := &apps.StatefulSet{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: setName,
+ Namespace: ns,
+ Labels: tiflashLabel.Labels(),
+ Annotations: stsAnnotations,
+ OwnerReferences: []metav1.OwnerReference{controller.GetOwnerRef(tc)},
+ },
+ Spec: apps.StatefulSetSpec{
+ Replicas: controller.Int32Ptr(tc.TiFlashStsDesiredReplicas()),
+ Selector: tiflashLabel.LabelSelector(),
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: tiflashLabel.Labels(),
+ Annotations: podAnnotations,
+ },
+ Spec: podSpec,
+ },
+ VolumeClaimTemplates: pvcs,
+ ServiceName: headlessSvcName,
+ PodManagementPolicy: apps.ParallelPodManagement,
+ UpdateStrategy: apps.StatefulSetUpdateStrategy{
+ Type: apps.RollingUpdateStatefulSetStrategyType,
+ RollingUpdate: &apps.RollingUpdateStatefulSetStrategy{
+ Partition: controller.Int32Ptr(tc.TiFlashStsDesiredReplicas()),
+ },
+ },
+ },
+ }
+ return tiflashset, nil
+}
+
+func flashVolumeClaimTemplate(storageClaims []v1alpha1.StorageClaim) ([]corev1.PersistentVolumeClaim, error) {
+ var pvcs []corev1.PersistentVolumeClaim
+ for k := range storageClaims {
+ storageRequest, err := controller.ParseStorageRequest(storageClaims[k].Resources.Requests)
+ if err != nil {
+ return nil, err
+ }
+ pvcs = append(pvcs, corev1.PersistentVolumeClaim{
+ ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("data%d", k)},
+ Spec: corev1.PersistentVolumeClaimSpec{
+ AccessModes: []corev1.PersistentVolumeAccessMode{
+ corev1.ReadWriteOnce,
+ },
+ StorageClassName: storageClaims[k].StorageClassName,
+ Resources: storageRequest,
+ },
+ })
+ }
+ return pvcs, nil
+}
+
+func getTiFlashConfigMap(tc *v1alpha1.TidbCluster) (*corev1.ConfigMap, error) {
+ config := tc.Spec.TiFlash.Config.DeepCopy()
+ if config == nil {
+ config = &v1alpha1.TiFlashConfig{}
+ }
+ setTiFlashConfigDefault(config, tc.Name, tc.Namespace)
+
+ // override CA if tls enabled
+ // if tc.IsTLSClusterEnabled() {
+ // if config.Security == nil {
+ // config.Security = &v1alpha1.TiFlashSecurityConfig{}
+ // }
+ // config.Security.CAPath = path.Join(tiflashClusterCertPath, tlsSecretRootCAKey)
+ // config.Security.CertPath = path.Join(tiflashClusterCertPath, corev1.TLSCertKey)
+ // config.Security.KeyPath = path.Join(tiflashClusterCertPath, corev1.TLSPrivateKeyKey)
+ // }
+
+ configText, err := MarshalTOML(config.CommonConfig)
+ if err != nil {
+ return nil, err
+ }
+ proxyText, err := MarshalTOML(config.ProxyConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ instanceName := tc.GetInstanceName()
+ tiflashLabel := label.New().Instance(instanceName).TiFlash().Labels()
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: controller.TiFlashMemberName(tc.Name),
+ Namespace: tc.Namespace,
+ Labels: tiflashLabel,
+ OwnerReferences: []metav1.OwnerReference{controller.GetOwnerRef(tc)},
+ },
+ Data: map[string]string{
+ "config_templ.toml": string(configText),
+ "proxy_templ.toml": string(proxyText),
+ },
+ }
+
+ if tc.BaseTiFlashSpec().ConfigUpdateStrategy() == v1alpha1.ConfigUpdateStrategyRollingUpdate {
+ if err := AddConfigMapDigestSuffix(cm); err != nil {
+ return nil, err
+ }
+ }
+
+ return cm, nil
+}
+
+func labelTiFlash(tc *v1alpha1.TidbCluster) label.Label {
+ instanceName := tc.GetInstanceName()
+ return label.New().Instance(instanceName).TiFlash()
+}
+
+func (tfmm *tiflashMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, set *apps.StatefulSet) error {
+ if set == nil {
+ // skip if not created yet
+ return nil
+ }
+ tc.Status.TiFlash.StatefulSet = &set.Status
+ upgrading, err := tfmm.tiflashStatefulSetIsUpgradingFn(tfmm.podLister, tfmm.pdControl, set, tc)
+ if err != nil {
+ return err
+ }
+ if upgrading && tc.Status.PD.Phase != v1alpha1.UpgradePhase {
+ tc.Status.TiFlash.Phase = v1alpha1.UpgradePhase
+ } else {
+ tc.Status.TiFlash.Phase = v1alpha1.NormalPhase
+ }
+
+ previousStores := tc.Status.TiFlash.Stores
+ stores := map[string]v1alpha1.TiKVStore{}
+ tombstoneStores := map[string]v1alpha1.TiKVStore{}
+
+ pdCli := controller.GetPDClient(tfmm.pdControl, tc)
+ // This only returns Up/Down/Offline stores
+ storesInfo, err := pdCli.GetStores()
+ if err != nil {
+ tc.Status.TiFlash.Synced = false
+ return err
+ }
+
+ pattern, err := regexp.Compile(fmt.Sprintf(tiflashStoreLimitPattern, tc.Name, tc.Name, tc.Namespace))
+ if err != nil {
+ return err
+ }
+ for _, store := range storesInfo.Stores {
+ // In theory, the external tiflash can join the cluster, and the operator would only manage the internal tiflash.
+ // So we check the store owner to make sure it.
+ if store.Store != nil && !pattern.Match([]byte(store.Store.Address)) {
+ continue
+ }
+ status := tfmm.getTiFlashStore(store)
+ if status == nil {
+ continue
+ }
+ // avoid LastHeartbeatTime be overwrite by zero time when pd lost LastHeartbeatTime
+ if status.LastHeartbeatTime.IsZero() {
+ if oldStatus, ok := previousStores[status.ID]; ok {
+ klog.V(4).Infof("the pod:%s's store LastHeartbeatTime is zero,so will keep in %v", status.PodName, oldStatus.LastHeartbeatTime)
+ status.LastHeartbeatTime = oldStatus.LastHeartbeatTime
+ }
+ }
+
+ oldStore, exist := previousStores[status.ID]
+
+ status.LastTransitionTime = metav1.Now()
+ if exist && status.State == oldStore.State {
+ status.LastTransitionTime = oldStore.LastTransitionTime
+ }
+
+ stores[status.ID] = *status
+ }
+
+ //this returns all tombstone stores
+ tombstoneStoresInfo, err := pdCli.GetTombStoneStores()
+ if err != nil {
+ tc.Status.TiFlash.Synced = false
+ return err
+ }
+ for _, store := range tombstoneStoresInfo.Stores {
+ status := tfmm.getTiFlashStore(store)
+ if status == nil {
+ continue
+ }
+ tombstoneStores[status.ID] = *status
+ }
+
+ tc.Status.TiFlash.Synced = true
+ tc.Status.TiFlash.Stores = stores
+ tc.Status.TiFlash.TombstoneStores = tombstoneStores
+ tc.Status.TiFlash.Image = ""
+ c := filterContainer(set, "tiflash")
+ if c != nil {
+ tc.Status.TiFlash.Image = c.Image
+ }
+ return nil
+}
+
+func (tfmm *tiflashMemberManager) getTiFlashStore(store *pdapi.StoreInfo) *v1alpha1.TiKVStore {
+ if store.Store == nil || store.Status == nil {
+ return nil
+ }
+ storeID := fmt.Sprintf("%d", store.Store.GetId())
+ ip := strings.Split(store.Store.GetAddress(), ":")[0]
+ podName := strings.Split(ip, ".")[0]
+
+ return &v1alpha1.TiKVStore{
+ ID: storeID,
+ PodName: podName,
+ IP: ip,
+ LeaderCount: int32(store.Status.LeaderCount),
+ State: store.Store.StateName,
+ LastHeartbeatTime: metav1.Time{Time: store.Status.LastHeartbeatTS},
+ }
+}
+
+func (tfmm *tiflashMemberManager) setStoreLabelsForTiFlash(tc *v1alpha1.TidbCluster) (int, error) {
+ ns := tc.GetNamespace()
+ // for unit test
+ setCount := 0
+
+ pdCli := controller.GetPDClient(tfmm.pdControl, tc)
+ storesInfo, err := pdCli.GetStores()
+ if err != nil {
+ return setCount, err
+ }
+
+ config, err := pdCli.GetConfig()
+ if err != nil {
+ return setCount, err
+ }
+
+ locationLabels := []string(config.Replication.LocationLabels)
+ if locationLabels == nil {
+ return setCount, nil
+ }
+
+ pattern, err := regexp.Compile(fmt.Sprintf(tiflashStoreLimitPattern, tc.Name, tc.Name, tc.Namespace))
+ if err != nil {
+ return -1, err
+ }
+ for _, store := range storesInfo.Stores {
+ // In theory, the external tiflash can join the cluster, and the operator would only manage the internal tiflash.
+ // So we check the store owner to make sure it.
+ if store.Store != nil && !pattern.Match([]byte(store.Store.Address)) {
+ continue
+ }
+ status := tfmm.getTiFlashStore(store)
+ if status == nil {
+ continue
+ }
+ podName := status.PodName
+
+ pod, err := tfmm.podLister.Pods(ns).Get(podName)
+ if err != nil {
+ return setCount, err
+ }
+
+ nodeName := pod.Spec.NodeName
+ ls, err := tfmm.getNodeLabels(nodeName, locationLabels)
+ if err != nil || len(ls) == 0 {
+ klog.Warningf("node: [%s] has no node labels, skipping set store labels for Pod: [%s/%s]", nodeName, ns, podName)
+ continue
+ }
+
+ if !tfmm.storeLabelsEqualNodeLabels(store.Store.Labels, ls) {
+ set, err := pdCli.SetStoreLabels(store.Store.Id, ls)
+ if err != nil {
+ klog.Warningf("failed to set pod: [%s/%s]'s store labels: %v", ns, podName, ls)
+ continue
+ }
+ if set {
+ setCount++
+ klog.Infof("pod: [%s/%s] set labels: %v successfully", ns, podName, ls)
+ }
+ }
+ }
+
+ return setCount, nil
+}
+
+func (tfmm *tiflashMemberManager) getNodeLabels(nodeName string, storeLabels []string) (map[string]string, error) {
+ node, err := tfmm.nodeLister.Get(nodeName)
+ if err != nil {
+ return nil, err
+ }
+ labels := map[string]string{}
+ ls := node.GetLabels()
+ for _, storeLabel := range storeLabels {
+ if value, found := ls[storeLabel]; found {
+ labels[storeLabel] = value
+ continue
+ }
+
+ // TODO after pd supports storeLabel containing slash character, these codes should be deleted
+ if storeLabel == "host" {
+ if host, found := ls[corev1.LabelHostname]; found {
+ labels[storeLabel] = host
+ }
+ }
+
+ }
+ return labels, nil
+}
+
+// storeLabelsEqualNodeLabels compares store labels with node labels
+// for historic reasons, PD stores TiFlash labels as []*StoreLabel which is a key-value pair slice
+func (tfmm *tiflashMemberManager) storeLabelsEqualNodeLabels(storeLabels []*metapb.StoreLabel, nodeLabels map[string]string) bool {
+ ls := map[string]string{}
+ for _, label := range storeLabels {
+ key := label.GetKey()
+ if _, ok := nodeLabels[key]; ok {
+ val := label.GetValue()
+ ls[key] = val
+ }
+ }
+ return reflect.DeepEqual(ls, nodeLabels)
+}
+
+func tiflashStatefulSetIsUpgrading(podLister corelisters.PodLister, pdControl pdapi.PDControlInterface, set *apps.StatefulSet, tc *v1alpha1.TidbCluster) (bool, error) {
+ if statefulSetIsUpgrading(set) {
+ return true, nil
+ }
+ instanceName := tc.GetInstanceName()
+ selector, err := label.New().Instance(instanceName).TiFlash().Selector()
+ if err != nil {
+ return false, err
+ }
+ tiflashPods, err := podLister.Pods(tc.GetNamespace()).List(selector)
+ if err != nil {
+ return false, err
+ }
+ for _, pod := range tiflashPods {
+ revisionHash, exist := pod.Labels[apps.ControllerRevisionHashLabelKey]
+ if !exist {
+ return false, nil
+ }
+ if revisionHash != tc.Status.TiFlash.StatefulSet.UpdateRevision {
+ return true, nil
+ }
+ }
+
+ return false, nil
+}
+
+type FakeTiFlashMemberManager struct {
+ err error
+}
+
+func NewFakeTiFlashMemberManager() *FakeTiFlashMemberManager {
+ return &FakeTiFlashMemberManager{}
+}
+
+func (ftmm *FakeTiFlashMemberManager) SetSyncError(err error) {
+ ftmm.err = err
+}
+
+func (ftmm *FakeTiFlashMemberManager) Sync(tc *v1alpha1.TidbCluster) error {
+ if ftmm.err != nil {
+ return ftmm.err
+ }
+ if len(tc.Status.TiFlash.Stores) != 0 {
+ // simulate status update
+ tc.Status.ClusterID = string(uuid.NewUUID())
+ }
+ return nil
+}
diff --git a/pkg/manager/member/tiflash_scaler.go b/pkg/manager/member/tiflash_scaler.go
new file mode 100644
index 0000000000..0b3dff0024
--- /dev/null
+++ b/pkg/manager/member/tiflash_scaler.go
@@ -0,0 +1,93 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package member
+
+import (
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ "github.com/pingcap/tidb-operator/pkg/controller"
+ "github.com/pingcap/tidb-operator/pkg/pdapi"
+ apps "k8s.io/api/apps/v1"
+ corelisters "k8s.io/client-go/listers/core/v1"
+)
+
+type tiflashScaler struct {
+ generalScaler
+ podLister corelisters.PodLister
+}
+
+// NewTiFlashScaler returns a tiflash Scaler
+func NewTiFlashScaler(pdControl pdapi.PDControlInterface,
+ pvcLister corelisters.PersistentVolumeClaimLister,
+ pvcControl controller.PVCControlInterface,
+ podLister corelisters.PodLister) Scaler {
+ return &tiflashScaler{generalScaler{pdControl, pvcLister, pvcControl}, podLister}
+}
+
+// TODO: Finish the scaling logic
+func (tfs *tiflashScaler) Scale(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error {
+ scaling, _, _, _ := scaleOne(oldSet, newSet)
+ if scaling > 0 {
+ return tfs.ScaleOut(tc, oldSet, newSet)
+ } else if scaling < 0 {
+ return tfs.ScaleIn(tc, oldSet, newSet)
+ }
+ // we only sync auto scaler annotations when we are finishing syncing scaling
+ return tfs.SyncAutoScalerAnn(tc, oldSet)
+}
+
+func (tfs *tiflashScaler) ScaleOut(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error {
+
+ return nil
+}
+
+func (tfs *tiflashScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error {
+
+ return nil
+}
+
+// SyncAutoScalerAnn reclaims the auto-scaling-out slots if the target pods no longer exist
+func (tfs *tiflashScaler) SyncAutoScalerAnn(tc *v1alpha1.TidbCluster, actual *apps.StatefulSet) error {
+
+ return nil
+}
+
+type fakeTiFlashScaler struct{}
+
+// NewFakeTiFlashScaler returns a fake tiflash Scaler
+func NewFakeTiFlashScaler() Scaler {
+ return &fakeTiFlashScaler{}
+}
+
+func (fsd *fakeTiFlashScaler) Scale(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error {
+ if *newSet.Spec.Replicas > *oldSet.Spec.Replicas {
+ return fsd.ScaleOut(tc, oldSet, newSet)
+ } else if *newSet.Spec.Replicas < *oldSet.Spec.Replicas {
+ return fsd.ScaleIn(tc, oldSet, newSet)
+ }
+ return nil
+}
+
+func (fsd *fakeTiFlashScaler) ScaleOut(_ *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error {
+ setReplicasAndDeleteSlots(newSet, *oldSet.Spec.Replicas+1, nil)
+ return nil
+}
+
+func (fsd *fakeTiFlashScaler) ScaleIn(_ *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error {
+ setReplicasAndDeleteSlots(newSet, *oldSet.Spec.Replicas-1, nil)
+ return nil
+}
+
+func (fsd *fakeTiFlashScaler) SyncAutoScalerAnn(tc *v1alpha1.TidbCluster, actual *apps.StatefulSet) error {
+ return nil
+}
diff --git a/pkg/manager/member/tiflash_upgrader.go b/pkg/manager/member/tiflash_upgrader.go
new file mode 100644
index 0000000000..1d3ff7c824
--- /dev/null
+++ b/pkg/manager/member/tiflash_upgrader.go
@@ -0,0 +1,57 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package member
+
+import (
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ "github.com/pingcap/tidb-operator/pkg/controller"
+ "github.com/pingcap/tidb-operator/pkg/pdapi"
+ apps "k8s.io/api/apps/v1"
+ corelisters "k8s.io/client-go/listers/core/v1"
+)
+
+type tiflashUpgrader struct {
+ pdControl pdapi.PDControlInterface
+ podControl controller.PodControlInterface
+ podLister corelisters.PodLister
+}
+
+// NewTiFlashUpgrader returns a tiflash Upgrader
+func NewTiFlashUpgrader(pdControl pdapi.PDControlInterface,
+ podControl controller.PodControlInterface,
+ podLister corelisters.PodLister) Upgrader {
+ return &tiflashUpgrader{
+ pdControl: pdControl,
+ podControl: podControl,
+ podLister: podLister,
+ }
+}
+
+// TODO: Finish the upgrade logic
+func (tku *tiflashUpgrader) Upgrade(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error {
+
+ return nil
+}
+
+type fakeTiFlashUpgrader struct{}
+
+// NewFakeTiFlashUpgrader returns a fake tiflash upgrader
+func NewFakeTiFlashUpgrader() Upgrader {
+ return &fakeTiFlashUpgrader{}
+}
+
+func (tku *fakeTiFlashUpgrader) Upgrade(tc *v1alpha1.TidbCluster, _ *apps.StatefulSet, _ *apps.StatefulSet) error {
+ tc.Status.TiFlash.Phase = v1alpha1.UpgradePhase
+ return nil
+}
diff --git a/pkg/manager/member/tiflash_util.go b/pkg/manager/member/tiflash_util.go
new file mode 100644
index 0000000000..7e32223d92
--- /dev/null
+++ b/pkg/manager/member/tiflash_util.go
@@ -0,0 +1,419 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package member
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ "github.com/pingcap/tidb-operator/pkg/controller"
+ corev1 "k8s.io/api/core/v1"
+)
+
+const (
+ defaultClusterLog = "/data0/logs/flash_cluster_manager.log"
+ defaultProxyLog = "/data0/logs/proxy.log"
+ defaultErrorLog = "/data0/logs/error.log"
+ defaultServerLog = "/data0/logs/server.log"
+)
+
+func buildTiFlashSidecarContainers(tc *v1alpha1.TidbCluster) []corev1.Container {
+ spec := tc.Spec.TiFlash
+ config := spec.Config.DeepCopy()
+ image := tc.HelperImage()
+ pullPolicy := tc.HelperImagePullPolicy()
+ var containers []corev1.Container
+ var resource corev1.ResourceRequirements
+ if spec.LogTailer != nil {
+ resource = controller.ContainerResource(spec.LogTailer.ResourceRequirements)
+ }
+ if config == nil {
+ config = &v1alpha1.TiFlashConfig{}
+ }
+ setTiFlashLogConfigDefault(config)
+ containers = append(containers, buildSidecarContainer("serverlog", config.CommonConfig.FlashLogger.ServerLog, image, pullPolicy, resource))
+ containers = append(containers, buildSidecarContainer("errorlog", config.CommonConfig.FlashLogger.ErrorLog, image, pullPolicy, resource))
+ containers = append(containers, buildSidecarContainer("proxylog", config.CommonConfig.Flash.FlashProxy.LogFile, image, pullPolicy, resource))
+ containers = append(containers, buildSidecarContainer("clusterlog", config.CommonConfig.Flash.FlashCluster.ClusterLog, image, pullPolicy, resource))
+ return containers
+}
+
+func buildSidecarContainer(name, path, image string,
+ pullPolicy corev1.PullPolicy,
+ resource corev1.ResourceRequirements) corev1.Container {
+ splitPath := strings.Split(path, string(os.PathSeparator))
+ // The log path should be at least /dir/base.log
+ if len(splitPath) >= 3 {
+ serverLogVolumeName := splitPath[1]
+ serverLogMountDir := "/" + serverLogVolumeName
+ return corev1.Container{
+ Name: name,
+ Image: image,
+ ImagePullPolicy: pullPolicy,
+ Resources: resource,
+ VolumeMounts: []corev1.VolumeMount{
+ {Name: serverLogVolumeName, MountPath: serverLogMountDir},
+ },
+ Command: []string{
+ "sh",
+ "-c",
+ fmt.Sprintf("touch %s; tail -n0 -F %s;", path, path),
+ },
+ }
+ }
+ return corev1.Container{
+ Name: name,
+ Image: image,
+ ImagePullPolicy: pullPolicy,
+ Resources: resource,
+ Command: []string{
+ "sh",
+ "-c",
+ fmt.Sprintf("touch %s; tail -n0 -F %s;", path, path),
+ },
+ }
+}
+
+func setTiFlashLogConfigDefault(config *v1alpha1.TiFlashConfig) {
+ if config.CommonConfig == nil {
+ config.CommonConfig = &v1alpha1.CommonConfig{}
+ }
+ if config.CommonConfig.Flash == nil {
+ config.CommonConfig.Flash = &v1alpha1.Flash{}
+ }
+ if config.CommonConfig.Flash.FlashCluster == nil {
+ config.CommonConfig.Flash.FlashCluster = &v1alpha1.FlashCluster{}
+ }
+ if config.CommonConfig.Flash.FlashCluster.ClusterLog == "" {
+ config.CommonConfig.Flash.FlashCluster.ClusterLog = defaultClusterLog
+ }
+ if config.CommonConfig.Flash.FlashProxy == nil {
+ config.CommonConfig.Flash.FlashProxy = &v1alpha1.FlashProxy{}
+ }
+ if config.CommonConfig.Flash.FlashProxy.LogFile == "" {
+ config.CommonConfig.Flash.FlashProxy.LogFile = defaultProxyLog
+ }
+
+ if config.CommonConfig.FlashLogger == nil {
+ config.CommonConfig.FlashLogger = &v1alpha1.FlashLogger{}
+ }
+ if config.CommonConfig.FlashLogger.ErrorLog == "" {
+ config.CommonConfig.FlashLogger.ErrorLog = defaultErrorLog
+ }
+ if config.CommonConfig.FlashLogger.ServerLog == "" {
+ config.CommonConfig.FlashLogger.ServerLog = defaultServerLog
+ }
+}
+
+// setTiFlashConfigDefault sets default configs for TiFlash
+func setTiFlashConfigDefault(config *v1alpha1.TiFlashConfig, clusterName, ns string) {
+ if config.CommonConfig == nil {
+ config.CommonConfig = &v1alpha1.CommonConfig{}
+ }
+ setTiFlashCommonConfigDefault(config.CommonConfig, clusterName, ns)
+ if config.ProxyConfig == nil {
+ config.ProxyConfig = &v1alpha1.ProxyConfig{}
+ }
+ setTiFlashProxyConfigDefault(config.ProxyConfig, clusterName, ns)
+}
+
+func setTiFlashProxyConfigDefault(config *v1alpha1.ProxyConfig, clusterName, ns string) {
+ if config.LogLevel == "" {
+ config.LogLevel = "info"
+ }
+ if config.Server == nil {
+ config.Server = &v1alpha1.FlashServerConfig{}
+ }
+ if config.Server.EngineAddr == "" {
+ config.Server.EngineAddr = fmt.Sprintf("%s-POD_NUM.%s.%s.svc:3930", controller.TiFlashMemberName(clusterName), controller.TiFlashPeerMemberName(clusterName), ns)
+ }
+}
+func setTiFlashCommonConfigDefault(config *v1alpha1.CommonConfig, clusterName, ns string) {
+ if config.TmpPath == "" {
+ config.TmpPath = "/data0/tmp"
+ }
+ if config.DisplayName == "" {
+ config.DisplayName = "TiFlash"
+ }
+ if config.DefaultProfile == "" {
+ config.DefaultProfile = "default"
+ }
+ if config.Path == "" {
+ config.Path = "/data0/db"
+ }
+ if config.PathRealtimeMode == nil {
+ b := false
+ config.PathRealtimeMode = &b
+ }
+ if config.MarkCacheSize == nil {
+ var m int64 = 5368709120
+ config.MarkCacheSize = &m
+ }
+ if config.MinmaxIndexCacheSize == nil {
+ var m int64 = 5368709120
+ config.MinmaxIndexCacheSize = &m
+ }
+ if config.ListenHost == "" {
+ config.ListenHost = "0.0.0.0"
+ }
+ if config.TCPPort == nil {
+ var p int32 = 9000
+ config.TCPPort = &p
+ }
+ if config.HTTPPort == nil {
+ var p int32 = 8123
+ config.HTTPPort = &p
+ }
+ if config.InternalServerHTTPPort == nil {
+ var p int32 = 9009
+ config.InternalServerHTTPPort = &p
+ }
+ if config.Flash == nil {
+ config.Flash = &v1alpha1.Flash{}
+ }
+ setTiFlashFlashConfigDefault(config.Flash, clusterName, ns)
+ if config.FlashLogger == nil {
+ config.FlashLogger = &v1alpha1.FlashLogger{}
+ }
+ setTiFlashLoggerConfigDefault(config.FlashLogger)
+ if config.FlashApplication == nil {
+ config.FlashApplication = &v1alpha1.FlashApplication{}
+ }
+ setTiFlashApplicationConfigDefault(config.FlashApplication)
+ if config.FlashRaft == nil {
+ config.FlashRaft = &v1alpha1.FlashRaft{}
+ }
+ setTiFlashRaftConfigDefault(config.FlashRaft, clusterName, ns)
+ if config.FlashStatus == nil {
+ config.FlashStatus = &v1alpha1.FlashStatus{}
+ }
+ setTiFlashStatusConfigDefault(config.FlashStatus)
+ if config.FlashQuota == nil {
+ config.FlashQuota = &v1alpha1.FlashQuota{}
+ }
+ setTiFlashQuotasConfigDefault(config.FlashQuota)
+ if config.FlashUser == nil {
+ config.FlashUser = &v1alpha1.FlashUser{}
+ }
+ setTiFlashUsersConfigDefault(config.FlashUser)
+ if config.FlashProfile == nil {
+ config.FlashProfile = &v1alpha1.FlashProfile{}
+ }
+ setTiFlashProfilesConfigDefault(config.FlashProfile)
+}
+
+func setTiFlashFlashConfigDefault(config *v1alpha1.Flash, clusterName, ns string) {
+ if config.TiDBStatusAddr == "" {
+ config.TiDBStatusAddr = fmt.Sprintf("%s.%s.svc:10080", controller.TiDBMemberName(clusterName), ns)
+ }
+ if config.ServiceAddr == "" {
+ config.ServiceAddr = fmt.Sprintf("%s-POD_NUM.%s.%s.svc:3930", controller.TiFlashMemberName(clusterName), controller.TiFlashPeerMemberName(clusterName), ns)
+ }
+ if config.OverlapThreshold == nil {
+ o := 0.6
+ config.OverlapThreshold = &o
+ }
+ if config.CompactLogMinPeriod == nil {
+ var o int32 = 200
+ config.CompactLogMinPeriod = &o
+ }
+ if config.FlashCluster == nil {
+ config.FlashCluster = &v1alpha1.FlashCluster{}
+ }
+ setTiFlashFlashClusterConfigDefault(config.FlashCluster)
+ if config.FlashProxy == nil {
+ config.FlashProxy = &v1alpha1.FlashProxy{}
+ }
+ setTiFlashFlashProxyConfigDefault(config.FlashProxy, clusterName, ns)
+}
+
+func setTiFlashFlashProxyConfigDefault(config *v1alpha1.FlashProxy, clusterName, ns string) {
+ if config.Addr == "" {
+ config.Addr = "0.0.0.0:20170"
+ }
+ if config.AdvertiseAddr == "" {
+ config.AdvertiseAddr = fmt.Sprintf("%s-POD_NUM.%s.%s.svc:20170", controller.TiFlashMemberName(clusterName), controller.TiFlashPeerMemberName(clusterName), ns)
+ }
+ if config.DataDir == "" {
+ config.DataDir = "/data0/proxy"
+ }
+ if config.Config == "" {
+ config.Config = "/data0/proxy.toml"
+ }
+ if config.LogFile == "" {
+ config.LogFile = defaultProxyLog
+ }
+}
+
+func setTiFlashFlashClusterConfigDefault(config *v1alpha1.FlashCluster) {
+ if config.ClusterManagerPath == "" {
+ config.ClusterManagerPath = "/tiflash/flash_cluster_manager"
+ }
+ if config.ClusterLog == "" {
+ config.ClusterLog = defaultClusterLog
+ }
+ if config.RefreshInterval == nil {
+ var r int32 = 20
+ config.RefreshInterval = &r
+ }
+ if config.UpdateRuleInterval == nil {
+ var r int32 = 10
+ config.UpdateRuleInterval = &r
+ }
+ if config.MasterTTL == nil {
+ var r int32 = 60
+ config.MasterTTL = &r
+ }
+}
+
+func setTiFlashLoggerConfigDefault(config *v1alpha1.FlashLogger) {
+ if config.ErrorLog == "" {
+ config.ErrorLog = defaultErrorLog
+ }
+ if config.Size == "" {
+ config.Size = "100M"
+ }
+ if config.ServerLog == "" {
+ config.ServerLog = defaultServerLog
+ }
+ if config.Level == "" {
+ config.Level = "information"
+ }
+ if config.Count == nil {
+ var c int32 = 10
+ config.Count = &c
+ }
+}
+
+func setTiFlashApplicationConfigDefault(config *v1alpha1.FlashApplication) {
+ if config.RunAsDaemon == nil {
+ r := true
+ config.RunAsDaemon = &r
+ }
+}
+
+func setTiFlashRaftConfigDefault(config *v1alpha1.FlashRaft, clusterName, ns string) {
+ if config.PDAddr == "" {
+ config.PDAddr = fmt.Sprintf("%s.%s.svc:2379", controller.PDMemberName(clusterName), ns)
+ }
+ if config.KVStorePath == "" {
+ config.KVStorePath = "/data0/kvstore"
+ }
+ if config.StorageEngine == "" {
+ config.StorageEngine = "dt"
+ }
+}
+
+func setTiFlashStatusConfigDefault(config *v1alpha1.FlashStatus) {
+ if config.MetricsPort == nil {
+ var d int32 = 8234
+ config.MetricsPort = &d
+ }
+}
+
+func setTiFlashQuotasConfigDefault(config *v1alpha1.FlashQuota) {
+ if config.Default == nil {
+ config.Default = &v1alpha1.Quota{}
+ }
+ if config.Default.Interval == nil {
+ config.Default.Interval = &v1alpha1.Interval{}
+ }
+ if config.Default.Interval.Duration == nil {
+ var d int32 = 3600
+ config.Default.Interval.Duration = &d
+ }
+ if config.Default.Interval.Queries == nil {
+ var d int32 = 0
+ config.Default.Interval.Queries = &d
+ }
+ if config.Default.Interval.Errors == nil {
+ var d int32 = 0
+ config.Default.Interval.Errors = &d
+ }
+ if config.Default.Interval.ResultRows == nil {
+ var d int32 = 0
+ config.Default.Interval.ResultRows = &d
+ }
+ if config.Default.Interval.ReadRows == nil {
+ var d int32 = 0
+ config.Default.Interval.ReadRows = &d
+ }
+ if config.Default.Interval.ExecutionTime == nil {
+ var d int32 = 0
+ config.Default.Interval.ExecutionTime = &d
+ }
+}
+
+func setTiFlashNetworksConfigDefault(config *v1alpha1.Networks) {
+ if config.IP == "" {
+ config.IP = "::/0"
+ }
+}
+
+func setTiFlashUsersConfigDefault(config *v1alpha1.FlashUser) {
+ if config.Readonly == nil {
+ config.Readonly = &v1alpha1.User{}
+ }
+ if config.Readonly.Profile == "" {
+ config.Readonly.Profile = "readonly"
+ }
+ if config.Readonly.Quota == "" {
+ config.Readonly.Quota = "default"
+ }
+ if config.Readonly.Networks == nil {
+ config.Readonly.Networks = &v1alpha1.Networks{}
+ }
+ setTiFlashNetworksConfigDefault(config.Readonly.Networks)
+
+ if config.Default == nil {
+ config.Default = &v1alpha1.User{}
+ }
+ if config.Default.Profile == "" {
+ config.Default.Profile = "default"
+ }
+ if config.Default.Quota == "" {
+ config.Default.Quota = "default"
+ }
+ if config.Default.Networks == nil {
+ config.Default.Networks = &v1alpha1.Networks{}
+ }
+ setTiFlashNetworksConfigDefault(config.Default.Networks)
+}
+
+func setTiFlashProfilesConfigDefault(config *v1alpha1.FlashProfile) {
+ if config.Readonly == nil {
+ config.Readonly = &v1alpha1.Profile{}
+ }
+ if config.Readonly.Readonly == nil {
+ var r int32 = 1
+ config.Readonly.Readonly = &r
+ }
+ if config.Default == nil {
+ config.Default = &v1alpha1.Profile{}
+ }
+ if config.Default.MaxMemoryUsage == nil {
+ var m int64 = 10000000000
+ config.Default.MaxMemoryUsage = &m
+ }
+ if config.Default.UseUncompressedCache == nil {
+ var u int32 = 0
+ config.Default.UseUncompressedCache = &u
+ }
+ if config.Default.LoadBalancing == nil {
+ l := "random"
+ config.Default.LoadBalancing = &l
+ }
+}
diff --git a/pkg/manager/member/tiflash_util_test.go b/pkg/manager/member/tiflash_util_test.go
new file mode 100644
index 0000000000..032259be96
--- /dev/null
+++ b/pkg/manager/member/tiflash_util_test.go
@@ -0,0 +1,594 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package member
+
+import (
+ "testing"
+
+ . "github.com/onsi/gomega"
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/utils/pointer"
+)
+
+var (
+ defaultTiFlashConfig = v1alpha1.TiFlashConfig{
+ CommonConfig: &v1alpha1.CommonConfig{
+ FlashApplication: &v1alpha1.FlashApplication{
+ RunAsDaemon: pointer.BoolPtr(true),
+ },
+ DefaultProfile: "default",
+ DisplayName: "TiFlash",
+ Flash: &v1alpha1.Flash{
+ CompactLogMinPeriod: pointer.Int32Ptr(200),
+ FlashCluster: &v1alpha1.FlashCluster{
+ ClusterManagerPath: "/tiflash/flash_cluster_manager",
+ ClusterLog: "/data0/logs/flash_cluster_manager.log",
+ MasterTTL: pointer.Int32Ptr(60),
+ RefreshInterval: pointer.Int32Ptr(20),
+ UpdateRuleInterval: pointer.Int32Ptr(10),
+ },
+ OverlapThreshold: pointer.Float64Ptr(0.6),
+ FlashProxy: &v1alpha1.FlashProxy{
+ Addr: "0.0.0.0:20170",
+ AdvertiseAddr: "test-tiflash-POD_NUM.test-tiflash-peer.test.svc:20170",
+ Config: "/data0/proxy.toml",
+ DataDir: "/data0/proxy",
+ LogFile: "/data0/logs/proxy.log",
+ },
+ ServiceAddr: "test-tiflash-POD_NUM.test-tiflash-peer.test.svc:3930",
+ TiDBStatusAddr: "test-tidb.test.svc:10080",
+ },
+ HTTPPort: pointer.Int32Ptr(8123),
+ InternalServerHTTPPort: pointer.Int32Ptr(9009),
+ ListenHost: "0.0.0.0",
+ FlashLogger: &v1alpha1.FlashLogger{
+ Count: pointer.Int32Ptr(10),
+ ErrorLog: "/data0/logs/error.log",
+ Level: "information",
+ ServerLog: "/data0/logs/server.log",
+ Size: "100M",
+ },
+ MarkCacheSize: pointer.Int64Ptr(5368709120),
+ MinmaxIndexCacheSize: pointer.Int64Ptr(5368709120),
+ Path: "/data0/db",
+ PathRealtimeMode: pointer.BoolPtr(false),
+ FlashProfile: &v1alpha1.FlashProfile{
+ Default: &v1alpha1.Profile{
+ LoadBalancing: pointer.StringPtr("random"),
+ MaxMemoryUsage: pointer.Int64Ptr(10000000000),
+ UseUncompressedCache: pointer.Int32Ptr(0),
+ },
+ Readonly: &v1alpha1.Profile{
+ Readonly: pointer.Int32Ptr(1),
+ },
+ },
+ FlashQuota: &v1alpha1.FlashQuota{
+ Default: &v1alpha1.Quota{
+ Interval: &v1alpha1.Interval{
+ Duration: pointer.Int32Ptr(3600),
+ Errors: pointer.Int32Ptr(0),
+ ExecutionTime: pointer.Int32Ptr(0),
+ Queries: pointer.Int32Ptr(0),
+ ReadRows: pointer.Int32Ptr(0),
+ ResultRows: pointer.Int32Ptr(0),
+ },
+ },
+ },
+ FlashRaft: &v1alpha1.FlashRaft{
+ KVStorePath: "/data0/kvstore",
+ PDAddr: "test-pd.test.svc:2379",
+ StorageEngine: "dt",
+ },
+ FlashStatus: &v1alpha1.FlashStatus{
+ MetricsPort: pointer.Int32Ptr(8234),
+ },
+ TCPPort: pointer.Int32Ptr(9000),
+ TmpPath: "/data0/tmp",
+ FlashUser: &v1alpha1.FlashUser{
+ Default: &v1alpha1.User{
+ Networks: &v1alpha1.Networks{
+ IP: "::/0",
+ },
+ Profile: "default",
+ Quota: "default",
+ },
+ Readonly: &v1alpha1.User{
+ Networks: &v1alpha1.Networks{
+ IP: "::/0",
+ },
+ Profile: "readonly",
+ Quota: "default",
+ },
+ },
+ },
+ ProxyConfig: &v1alpha1.ProxyConfig{
+ LogLevel: "info",
+ Server: &v1alpha1.FlashServerConfig{
+ EngineAddr: "test-tiflash-POD_NUM.test-tiflash-peer.test.svc:3930",
+ },
+ },
+ }
+ customTiFlashConfig = v1alpha1.TiFlashConfig{
+ CommonConfig: &v1alpha1.CommonConfig{
+ FlashApplication: &v1alpha1.FlashApplication{
+ RunAsDaemon: pointer.BoolPtr(false),
+ },
+ DefaultProfile: "defaul",
+ DisplayName: "TiFlah",
+ Flash: &v1alpha1.Flash{
+ CompactLogMinPeriod: pointer.Int32Ptr(100),
+ FlashCluster: &v1alpha1.FlashCluster{
+ ClusterManagerPath: "/flash_cluster_manager",
+ ClusterLog: "/data1/logs/flash_cluster_manager.log",
+ MasterTTL: pointer.Int32Ptr(50),
+ RefreshInterval: pointer.Int32Ptr(21),
+ UpdateRuleInterval: pointer.Int32Ptr(11),
+ },
+ OverlapThreshold: pointer.Float64Ptr(0.7),
+ FlashProxy: &v1alpha1.FlashProxy{
+ Addr: "0.0.0.0:20171",
+ AdvertiseAddr: "test-tiflash-POD_NUM.test-tiflash-peer.test.svc:20171",
+ Config: "/data0/proxy1.toml",
+ DataDir: "/data0/proxy1",
+ LogFile: "/data0/logs/proxy1.log",
+ },
+ ServiceAddr: "test-tiflash-POD_NUM.test-tiflash-peer.test.svc:3931",
+ TiDBStatusAddr: "test-tidb.test.svc:10081",
+ },
+ HTTPPort: pointer.Int32Ptr(8121),
+ InternalServerHTTPPort: pointer.Int32Ptr(9001),
+ ListenHost: "0.0.0.1",
+ FlashLogger: &v1alpha1.FlashLogger{
+ Count: pointer.Int32Ptr(11),
+ ErrorLog: "/data1/logs/error1.log",
+ Level: "information1",
+ ServerLog: "/data0/logs/server1.log",
+ Size: "101M",
+ },
+ MarkCacheSize: pointer.Int64Ptr(5368709121),
+ MinmaxIndexCacheSize: pointer.Int64Ptr(5368709121),
+ Path: "/data1/db",
+ PathRealtimeMode: pointer.BoolPtr(true),
+ FlashProfile: &v1alpha1.FlashProfile{
+ Default: &v1alpha1.Profile{
+ LoadBalancing: pointer.StringPtr("random1"),
+ MaxMemoryUsage: pointer.Int64Ptr(10000000001),
+ UseUncompressedCache: pointer.Int32Ptr(1),
+ },
+ Readonly: &v1alpha1.Profile{
+ Readonly: pointer.Int32Ptr(0),
+ },
+ },
+ FlashQuota: &v1alpha1.FlashQuota{
+ Default: &v1alpha1.Quota{
+ Interval: &v1alpha1.Interval{
+ Duration: pointer.Int32Ptr(3601),
+ Errors: pointer.Int32Ptr(1),
+ ExecutionTime: pointer.Int32Ptr(1),
+ Queries: pointer.Int32Ptr(1),
+ ReadRows: pointer.Int32Ptr(1),
+ ResultRows: pointer.Int32Ptr(1),
+ },
+ },
+ },
+ FlashRaft: &v1alpha1.FlashRaft{
+ KVStorePath: "/data1/kvstore",
+ PDAddr: "test-pd.test.svc:2379",
+ StorageEngine: "dt",
+ },
+ FlashStatus: &v1alpha1.FlashStatus{
+ MetricsPort: pointer.Int32Ptr(8235),
+ },
+ TCPPort: pointer.Int32Ptr(9001),
+ TmpPath: "/data1/tmp",
+ FlashUser: &v1alpha1.FlashUser{
+ Default: &v1alpha1.User{
+ Networks: &v1alpha1.Networks{
+ IP: "::/1",
+ },
+ Profile: "default1",
+ Quota: "default1",
+ },
+ Readonly: &v1alpha1.User{
+ Networks: &v1alpha1.Networks{
+ IP: "::/1",
+ },
+ Profile: "readonly1",
+ Quota: "default1",
+ },
+ },
+ },
+ ProxyConfig: &v1alpha1.ProxyConfig{
+ LogLevel: "info1",
+ Server: &v1alpha1.FlashServerConfig{
+ EngineAddr: "test-tiflash-POD_NUM.test-tiflash-peer.test.svc:3930",
+ },
+ },
+ }
+ defaultTiFlashLogConfig = v1alpha1.TiFlashConfig{
+ CommonConfig: &v1alpha1.CommonConfig{
+ Flash: &v1alpha1.Flash{
+ FlashCluster: &v1alpha1.FlashCluster{
+ ClusterLog: "/data0/logs/flash_cluster_manager.log",
+ },
+ FlashProxy: &v1alpha1.FlashProxy{
+ LogFile: "/data0/logs/proxy.log",
+ },
+ },
+ FlashLogger: &v1alpha1.FlashLogger{
+ ErrorLog: "/data0/logs/error.log",
+ ServerLog: "/data0/logs/server.log",
+ },
+ },
+ }
+ customTiFlashLogConfig = v1alpha1.TiFlashConfig{
+ CommonConfig: &v1alpha1.CommonConfig{
+ Flash: &v1alpha1.Flash{
+ FlashCluster: &v1alpha1.FlashCluster{
+ ClusterLog: "/data1/logs/flash_cluster_manager.log",
+ },
+ FlashProxy: &v1alpha1.FlashProxy{
+ LogFile: "/data1/logs/proxy.log",
+ },
+ },
+ FlashLogger: &v1alpha1.FlashLogger{
+ ErrorLog: "/data1/logs/error.log",
+ ServerLog: "/data1/logs/server.log",
+ },
+ },
+ }
+ defaultSideCarContainers = []corev1.Container{
+ {
+ Name: "serverlog",
+ Image: "busybox:1.26.2",
+ ImagePullPolicy: "",
+ Resources: corev1.ResourceRequirements{},
+ Command: []string{
+ "sh",
+ "-c",
+ "touch /data0/logs/server.log; tail -n0 -F /data0/logs/server.log;",
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {Name: "data0", MountPath: "/data0"},
+ },
+ },
+ {
+ Name: "errorlog",
+ Image: "busybox:1.26.2",
+ ImagePullPolicy: "",
+ Resources: corev1.ResourceRequirements{},
+ Command: []string{
+ "sh",
+ "-c",
+ "touch /data0/logs/error.log; tail -n0 -F /data0/logs/error.log;",
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {Name: "data0", MountPath: "/data0"},
+ },
+ },
+ {
+ Name: "proxylog",
+ Image: "busybox:1.26.2",
+ ImagePullPolicy: "",
+ Resources: corev1.ResourceRequirements{},
+ Command: []string{
+ "sh",
+ "-c",
+ "touch /data0/logs/proxy.log; tail -n0 -F /data0/logs/proxy.log;",
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {Name: "data0", MountPath: "/data0"},
+ },
+ },
+ {
+ Name: "clusterlog",
+ Image: "busybox:1.26.2",
+ ImagePullPolicy: "",
+ Resources: corev1.ResourceRequirements{},
+ Command: []string{
+ "sh",
+ "-c",
+ "touch /data0/logs/flash_cluster_manager.log; tail -n0 -F /data0/logs/flash_cluster_manager.log;",
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {Name: "data0", MountPath: "/data0"},
+ },
+ },
+ }
+ customSideCarContainers = []corev1.Container{
+ {
+ Name: "serverlog",
+ Image: "busybox:1.26.2",
+ ImagePullPolicy: "",
+ Resources: corev1.ResourceRequirements{},
+ Command: []string{
+ "sh",
+ "-c",
+ "touch /data1/logs/server.log; tail -n0 -F /data1/logs/server.log;",
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {Name: "data1", MountPath: "/data1"},
+ },
+ },
+ {
+ Name: "errorlog",
+ Image: "busybox:1.26.2",
+ ImagePullPolicy: "",
+ Resources: corev1.ResourceRequirements{},
+ Command: []string{
+ "sh",
+ "-c",
+ "touch /data1/logs/error.log; tail -n0 -F /data1/logs/error.log;",
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {Name: "data1", MountPath: "/data1"},
+ },
+ },
+ {
+ Name: "proxylog",
+ Image: "busybox:1.26.2",
+ ImagePullPolicy: "",
+ Resources: corev1.ResourceRequirements{},
+ Command: []string{
+ "sh",
+ "-c",
+ "touch /data1/logs/proxy.log; tail -n0 -F /data1/logs/proxy.log;",
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {Name: "data1", MountPath: "/data1"},
+ },
+ },
+ {
+ Name: "clusterlog",
+ Image: "busybox:1.26.2",
+ ImagePullPolicy: "",
+ Resources: corev1.ResourceRequirements{},
+ Command: []string{
+ "sh",
+ "-c",
+ "touch /data1/logs/flash_cluster_manager.log; tail -n0 -F /data1/logs/flash_cluster_manager.log;",
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {Name: "data1", MountPath: "/data1"},
+ },
+ },
+ }
+ customResourceSideCarContainers = []corev1.Container{
+ {
+ Name: "serverlog",
+ Image: "busybox:1.26.2",
+ ImagePullPolicy: "",
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("1"),
+ corev1.ResourceMemory: resource.MustParse("2Gi"),
+ },
+ },
+ Command: []string{
+ "sh",
+ "-c",
+ "touch /data1/logs/server.log; tail -n0 -F /data1/logs/server.log;",
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {Name: "data1", MountPath: "/data1"},
+ },
+ },
+ {
+ Name: "errorlog",
+ Image: "busybox:1.26.2",
+ ImagePullPolicy: "",
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("1"),
+ corev1.ResourceMemory: resource.MustParse("2Gi"),
+ },
+ },
+ Command: []string{
+ "sh",
+ "-c",
+ "touch /data1/logs/error.log; tail -n0 -F /data1/logs/error.log;",
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {Name: "data1", MountPath: "/data1"},
+ },
+ },
+ {
+ Name: "proxylog",
+ Image: "busybox:1.26.2",
+ ImagePullPolicy: "",
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("1"),
+ corev1.ResourceMemory: resource.MustParse("2Gi"),
+ },
+ },
+ Command: []string{
+ "sh",
+ "-c",
+ "touch /data1/logs/proxy.log; tail -n0 -F /data1/logs/proxy.log;",
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {Name: "data1", MountPath: "/data1"},
+ },
+ },
+ {
+ Name: "clusterlog",
+ Image: "busybox:1.26.2",
+ ImagePullPolicy: "",
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("1"),
+ corev1.ResourceMemory: resource.MustParse("2Gi"),
+ },
+ },
+ Command: []string{
+ "sh",
+ "-c",
+ "touch /data1/logs/flash_cluster_manager.log; tail -n0 -F /data1/logs/flash_cluster_manager.log;",
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {Name: "data1", MountPath: "/data1"},
+ },
+ },
+ }
+)
+
+func newTidbCluster() *v1alpha1.TidbCluster {
+ return &v1alpha1.TidbCluster{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "TidbCluster",
+ APIVersion: "pingcap.com/v1alpha1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-pd",
+ Namespace: corev1.NamespaceDefault,
+ UID: types.UID("test"),
+ },
+ Spec: v1alpha1.TidbClusterSpec{
+ PD: v1alpha1.PDSpec{
+ ComponentSpec: v1alpha1.ComponentSpec{
+ Image: "pd-test-image",
+ },
+ },
+ TiKV: v1alpha1.TiKVSpec{
+ ComponentSpec: v1alpha1.ComponentSpec{
+ Image: "tikv-test-image",
+ },
+ },
+ TiDB: v1alpha1.TiDBSpec{
+ ComponentSpec: v1alpha1.ComponentSpec{
+ Image: "tidb-test-image",
+ },
+ },
+ TiFlash: &v1alpha1.TiFlashSpec{},
+ },
+ }
+}
+
+func TestBuildTiFlashSidecarContainers(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ type testcase struct {
+ name string
+ flashConfig *v1alpha1.TiFlashConfig
+ expect []corev1.Container
+ resource bool
+ }
+
+ tests := []*testcase{
+ {
+ name: "nil config",
+ flashConfig: nil,
+ expect: defaultSideCarContainers,
+ },
+ {
+ name: "empty config",
+ flashConfig: &v1alpha1.TiFlashConfig{},
+ expect: defaultSideCarContainers,
+ },
+ {
+ name: "custom config",
+ flashConfig: &customTiFlashLogConfig,
+ expect: customSideCarContainers,
+ },
+ {
+ name: "custom resource config",
+ flashConfig: &customTiFlashLogConfig,
+ expect: customResourceSideCarContainers,
+ resource: true,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ tc := newTidbCluster()
+ tc.Spec.TiFlash.Config = test.flashConfig
+ if test.resource {
+ tc.Spec.TiFlash.LogTailer = &v1alpha1.LogTailerSpec{}
+ tc.Spec.TiFlash.LogTailer.ResourceRequirements = corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("1"),
+ corev1.ResourceMemory: resource.MustParse("2Gi"),
+ corev1.ResourceStorage: resource.MustParse("100Gi"),
+ },
+ }
+ }
+ cs := buildTiFlashSidecarContainers(tc)
+ g.Expect(cs).To(Equal(test.expect))
+ })
+ }
+}
+func TestSetTiFlashConfigDefault(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ type testcase struct {
+ name string
+ config v1alpha1.TiFlashConfig
+ expect v1alpha1.TiFlashConfig
+ }
+
+ tests := []*testcase{
+ {
+ name: "nil config",
+ config: v1alpha1.TiFlashConfig{},
+ expect: defaultTiFlashConfig,
+ },
+ {
+ name: "custom config",
+ config: customTiFlashConfig,
+ expect: customTiFlashConfig,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ setTiFlashConfigDefault(&test.config, "test", "test")
+ g.Expect(test.config).To(Equal(test.expect))
+ })
+ }
+}
+
+func TestSetTiFlashLogConfigDefault(t *testing.T) {
+ g := NewGomegaWithT(t)
+
+ type testcase struct {
+ name string
+ config v1alpha1.TiFlashConfig
+ expect v1alpha1.TiFlashConfig
+ }
+
+ tests := []*testcase{
+ {
+ name: "nil config",
+ config: v1alpha1.TiFlashConfig{},
+ expect: defaultTiFlashLogConfig,
+ },
+ {
+ name: "custom config",
+ config: customTiFlashLogConfig,
+ expect: customTiFlashLogConfig,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ setTiFlashLogConfigDefault(&test.config)
+ g.Expect(test.config).To(Equal(test.expect))
+ })
+ }
+}
diff --git a/pkg/manager/member/tikv_failover.go b/pkg/manager/member/tikv_failover.go
index acf16c4952..036659a471 100644
--- a/pkg/manager/member/tikv_failover.go
+++ b/pkg/manager/member/tikv_failover.go
@@ -14,20 +14,24 @@
package member
import (
+ "fmt"
"time"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- glog "k8s.io/klog"
+ "k8s.io/client-go/tools/record"
+ "k8s.io/klog"
)
type tikvFailover struct {
tikvFailoverPeriod time.Duration
+ recorder record.EventRecorder
}
// NewTiKVFailover returns a tikv Failover
-func NewTiKVFailover(tikvFailoverPeriod time.Duration) Failover {
- return &tikvFailover{tikvFailoverPeriod}
+func NewTiKVFailover(tikvFailoverPeriod time.Duration, recorder record.EventRecorder) Failover {
+ return &tikvFailover{tikvFailoverPeriod, recorder}
}
func (tf *tikvFailover) Failover(tc *v1alpha1.TidbCluster) error {
@@ -51,18 +55,19 @@ func (tf *tikvFailover) Failover(tc *v1alpha1.TidbCluster) error {
if tc.Status.TiKV.FailureStores == nil {
tc.Status.TiKV.FailureStores = map[string]v1alpha1.TiKVFailureStore{}
}
- if tc.Spec.TiKV.MaxFailoverCount != nil {
+ if tc.Spec.TiKV.MaxFailoverCount != nil && *tc.Spec.TiKV.MaxFailoverCount > 0 {
maxFailoverCount := *tc.Spec.TiKV.MaxFailoverCount
- if maxFailoverCount > 0 && len(tc.Status.TiKV.FailureStores) >= int(maxFailoverCount) {
- glog.Warningf("%s/%s failure stores count reached the limit: %d", ns, tcName, tc.Spec.TiKV.MaxFailoverCount)
+ if len(tc.Status.TiKV.FailureStores) >= int(maxFailoverCount) {
+ klog.Warningf("%s/%s failure stores count reached the limit: %d", ns, tcName, tc.Spec.TiKV.MaxFailoverCount)
return nil
}
- }
-
- tc.Status.TiKV.FailureStores[storeID] = v1alpha1.TiKVFailureStore{
- PodName: podName,
- StoreID: store.ID,
- CreatedAt: metav1.Now(),
+ tc.Status.TiKV.FailureStores[storeID] = v1alpha1.TiKVFailureStore{
+ PodName: podName,
+ StoreID: store.ID,
+ CreatedAt: metav1.Now(),
+ }
+ msg := fmt.Sprintf("store[%s] is Down", store.ID)
+ tf.recorder.Event(tc, corev1.EventTypeWarning, unHealthEventReason, fmt.Sprintf(unHealthEventMsgPattern, "tikv", podName, msg))
}
}
}
diff --git a/pkg/manager/member/tikv_failover_test.go b/pkg/manager/member/tikv_failover_test.go
index 6d4cb1387e..477d698a97 100644
--- a/pkg/manager/member/tikv_failover_test.go
+++ b/pkg/manager/member/tikv_failover_test.go
@@ -20,6 +20,7 @@ import (
. "github.com/onsi/gomega"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/tools/record"
"k8s.io/utils/pointer"
)
@@ -294,7 +295,7 @@ func TestTiKVFailoverFailover(t *testing.T) {
err: false,
expectFn: func(tc *v1alpha1.TidbCluster) {
g.Expect(int(tc.Spec.TiKV.Replicas)).To(Equal(3))
- g.Expect(len(tc.Status.TiKV.FailureStores)).To(Equal(6))
+ g.Expect(len(tc.Status.TiKV.FailureStores)).To(Equal(3))
},
},
}
@@ -304,5 +305,6 @@ func TestTiKVFailoverFailover(t *testing.T) {
}
func newFakeTiKVFailover() *tikvFailover {
- return &tikvFailover{1 * time.Hour}
+ recorder := record.NewFakeRecorder(100)
+ return &tikvFailover{1 * time.Hour, recorder}
}
diff --git a/pkg/manager/member/tikv_member_manager.go b/pkg/manager/member/tikv_member_manager.go
index 14e6beda82..76b0c5ec30 100644
--- a/pkg/manager/member/tikv_member_manager.go
+++ b/pkg/manager/member/tikv_member_manager.go
@@ -15,6 +15,7 @@ package member
import (
"fmt"
+ "path"
"reflect"
"regexp"
"strings"
@@ -25,6 +26,7 @@ import (
"github.com/pingcap/tidb-operator/pkg/label"
"github.com/pingcap/tidb-operator/pkg/manager"
"github.com/pingcap/tidb-operator/pkg/pdapi"
+ "github.com/pingcap/tidb-operator/pkg/util"
apps "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
@@ -33,7 +35,15 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
v1 "k8s.io/client-go/listers/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
+)
+
+const (
+ // tikvClusterCertPath is where the cert for inter-cluster communication stored (if any)
+ tikvClusterCertPath = "/var/lib/tikv-tls"
+
+ //find a better way to manage store only managed by tikv in Operator
+ tikvStoreLimitPattern = `%s-tikv-\d+\.%s-tikv-peer\.%s\.svc\:\d+`
)
// tikvMemberManager implements manager.Manager.
@@ -41,7 +51,6 @@ type tikvMemberManager struct {
setControl controller.StatefulSetControlInterface
svcControl controller.ServiceControlInterface
pdControl pdapi.PDControlInterface
- certControl controller.CertControlInterface
typedControl controller.TypedControlInterface
setLister v1.StatefulSetLister
svcLister corelisters.ServiceLister
@@ -55,10 +64,10 @@ type tikvMemberManager struct {
}
// NewTiKVMemberManager returns a *tikvMemberManager
-func NewTiKVMemberManager(pdControl pdapi.PDControlInterface,
+func NewTiKVMemberManager(
+ pdControl pdapi.PDControlInterface,
setControl controller.StatefulSetControlInterface,
svcControl controller.ServiceControlInterface,
- certControl controller.CertControlInterface,
typedControl controller.TypedControlInterface,
setLister v1.StatefulSetLister,
svcLister corelisters.ServiceLister,
@@ -74,7 +83,6 @@ func NewTiKVMemberManager(pdControl pdapi.PDControlInterface,
nodeLister: nodeLister,
setControl: setControl,
svcControl: svcControl,
- certControl: certControl,
typedControl: typedControl,
setLister: setLister,
svcLister: svcLister,
@@ -123,6 +131,11 @@ func (tkmm *tikvMemberManager) Sync(tc *v1alpha1.TidbCluster) error {
}
func (tkmm *tikvMemberManager) syncServiceForTidbCluster(tc *v1alpha1.TidbCluster, svcConfig SvcConfig) error {
+ if tc.Spec.Paused {
+ klog.V(4).Infof("tikv cluster %s/%s is paused, skip syncing for tikv service", tc.GetNamespace(), tc.GetName())
+ return nil
+ }
+
ns := tc.GetNamespace()
tcName := tc.GetName()
@@ -172,6 +185,16 @@ func (tkmm *tikvMemberManager) syncStatefulSetForTidbCluster(tc *v1alpha1.TidbCl
setNotExist := errors.IsNotFound(err)
oldSet := oldSetTmp.DeepCopy()
+
+ if err := tkmm.syncTidbClusterStatus(tc, oldSet); err != nil {
+ return err
+ }
+
+ if tc.Spec.Paused {
+ klog.V(4).Infof("tikv cluster %s/%s is paused, skip syncing for tikv statefulset", tc.GetNamespace(), tc.GetName())
+ return nil
+ }
+
cm, err := tkmm.syncTiKVConfigMap(tc, oldSet)
if err != nil {
return err
@@ -186,12 +209,6 @@ func (tkmm *tikvMemberManager) syncStatefulSetForTidbCluster(tc *v1alpha1.TidbCl
if err != nil {
return err
}
- if tc.IsTLSClusterEnabled() {
- err := tkmm.syncTiKVServerCerts(tc)
- if err != nil {
- return err
- }
- }
err = tkmm.setControl.CreateStatefulSet(tc, newSet)
if err != nil {
return err
@@ -200,10 +217,6 @@ func (tkmm *tikvMemberManager) syncStatefulSetForTidbCluster(tc *v1alpha1.TidbCl
return nil
}
- if err := tkmm.syncTidbClusterStatus(tc, oldSet); err != nil {
- return err
- }
-
if _, err := tkmm.setStoreLabelsForTiKV(tc); err != nil {
return err
}
@@ -218,7 +231,7 @@ func (tkmm *tikvMemberManager) syncStatefulSetForTidbCluster(tc *v1alpha1.TidbCl
return err
}
- if tkmm.autoFailover {
+ if tkmm.autoFailover && tc.Spec.TiKV.MaxFailoverCount != nil {
if tc.TiKVAllPodsStarted() && !tc.TiKVAllStoresReady() {
if err := tkmm.tikvFailover.Failover(tc); err != nil {
return err
@@ -229,34 +242,6 @@ func (tkmm *tikvMemberManager) syncStatefulSetForTidbCluster(tc *v1alpha1.TidbCl
return updateStatefulSet(tkmm.setControl, tc, newSet, oldSet)
}
-func (tkmm *tikvMemberManager) syncTiKVServerCerts(tc *v1alpha1.TidbCluster) error {
- ns := tc.GetNamespace()
- tcName := tc.GetName()
- svcName := controller.TiKVMemberName(tcName)
- peerName := controller.TiKVPeerMemberName(tcName)
-
- if tkmm.certControl.CheckSecret(ns, svcName) {
- return nil
- }
-
- hostList := []string{
- peerName,
- fmt.Sprintf("%s.%s", peerName, ns),
- fmt.Sprintf("*.%s.%s.svc", peerName, ns),
- }
-
- certOpts := &controller.TiDBClusterCertOptions{
- Namespace: ns,
- Instance: tcName,
- CommonName: svcName,
- HostList: hostList,
- Component: "tikv",
- Suffix: "tikv",
- }
-
- return tkmm.certControl.Create(controller.GetOwnerRef(tc), certOpts)
-}
-
func (tkmm *tikvMemberManager) syncTiKVConfigMap(tc *v1alpha1.TidbCluster, set *apps.StatefulSet) (*corev1.ConfigMap, error) {
// For backward compatibility, only sync tidb configmap when .tikv.config is non-nil
if tc.Spec.TiKV.Config == nil {
@@ -359,7 +344,7 @@ func getNewTiKVSetForTidbCluster(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap)
vols = append(vols, corev1.Volume{
Name: "tikv-tls", VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
- SecretName: controller.TiKVMemberName(tcName),
+ SecretName: util.ClusterTLSSecretName(tc.Name, label.TiKVLabelVal),
},
},
})
@@ -442,7 +427,7 @@ func getNewTiKVSetForTidbCluster(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap)
ImagePullPolicy: baseTiKVSpec.ImagePullPolicy(),
Command: []string{"/bin/sh", "/usr/local/bin/tikv_start_script.sh"},
SecurityContext: &corev1.SecurityContext{
- Privileged: tc.Spec.TiKV.Privileged,
+ Privileged: tc.TiKVContainerPrivilege(),
},
Ports: []corev1.ContainerPort{
{
@@ -466,11 +451,12 @@ func getNewTiKVSetForTidbCluster(tc *v1alpha1.TidbCluster, cm *corev1.ConfigMap)
},
})
}
- tikvContainer.Env = env
+ tikvContainer.Env = util.AppendEnv(env, baseTiKVSpec.Env())
podSpec.Volumes = vols
podSpec.SecurityContext = podSecurityContext
podSpec.InitContainers = initContainers
podSpec.Containers = []corev1.Container{tikvContainer}
+ podSpec.ServiceAccountName = tc.Spec.TiKV.ServiceAccount
tikvset := &apps.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
@@ -525,6 +511,17 @@ func getTikVConfigMap(tc *v1alpha1.TidbCluster) (*corev1.ConfigMap, error) {
if config == nil {
return nil, nil
}
+
+ // override CA if tls enabled
+ if tc.IsTLSClusterEnabled() {
+ if config.Security == nil {
+ config.Security = &v1alpha1.TiKVSecurityConfig{}
+ }
+ config.Security.CAPath = path.Join(tikvClusterCertPath, tlsSecretRootCAKey)
+ config.Security.CertPath = path.Join(tikvClusterCertPath, corev1.TLSCertKey)
+ config.Security.KeyPath = path.Join(tikvClusterCertPath, corev1.TLSPrivateKeyKey)
+ }
+
confText, err := MarshalTOML(config)
if err != nil {
return nil, err
@@ -565,6 +562,10 @@ func labelTiKV(tc *v1alpha1.TidbCluster) label.Label {
}
func (tkmm *tikvMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, set *apps.StatefulSet) error {
+ if set == nil {
+ // skip if not created yet
+ return nil
+ }
tc.Status.TiKV.StatefulSet = &set.Status
upgrading, err := tkmm.tikvStatefulSetIsUpgradingFn(tkmm.podLister, tkmm.pdControl, set, tc)
if err != nil {
@@ -588,7 +589,7 @@ func (tkmm *tikvMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, s
return err
}
- pattern, err := regexp.Compile(fmt.Sprintf(`%s-tikv-\d+\.%s-tikv-peer\.%s\.svc\:\d+`, tc.Name, tc.Name, tc.Namespace))
+ pattern, err := regexp.Compile(fmt.Sprintf(tikvStoreLimitPattern, tc.Name, tc.Name, tc.Namespace))
if err != nil {
return err
}
@@ -605,7 +606,7 @@ func (tkmm *tikvMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, s
// avoid LastHeartbeatTime be overwrite by zero time when pd lost LastHeartbeatTime
if status.LastHeartbeatTime.IsZero() {
if oldStatus, ok := previousStores[status.ID]; ok {
- glog.V(4).Infof("the pod:%s's store LastHeartbeatTime is zero,so will keep in %v", status.PodName, oldStatus.LastHeartbeatTime)
+ klog.V(4).Infof("the pod:%s's store LastHeartbeatTime is zero,so will keep in %v", status.PodName, oldStatus.LastHeartbeatTime)
status.LastHeartbeatTime = oldStatus.LastHeartbeatTime
}
}
@@ -637,6 +638,11 @@ func (tkmm *tikvMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, s
tc.Status.TiKV.Synced = true
tc.Status.TiKV.Stores = stores
tc.Status.TiKV.TombstoneStores = tombstoneStores
+ tc.Status.TiKV.Image = ""
+ c := filterContainer(set, "tikv")
+ if c != nil {
+ tc.Status.TiKV.Image = c.Image
+ }
return nil
}
@@ -679,7 +685,16 @@ func (tkmm *tikvMemberManager) setStoreLabelsForTiKV(tc *v1alpha1.TidbCluster) (
return setCount, nil
}
+ pattern, err := regexp.Compile(fmt.Sprintf(tikvStoreLimitPattern, tc.Name, tc.Name, tc.Namespace))
+ if err != nil {
+ return -1, err
+ }
for _, store := range storesInfo.Stores {
+ // In theory, the external tikv can join the cluster, and the operator would only manage the internal tikv.
+ // So we check the store owner to make sure it.
+ if store.Store != nil && !pattern.Match([]byte(store.Store.Address)) {
+ continue
+ }
status := tkmm.getTiKVStore(store)
if status == nil {
continue
@@ -694,19 +709,19 @@ func (tkmm *tikvMemberManager) setStoreLabelsForTiKV(tc *v1alpha1.TidbCluster) (
nodeName := pod.Spec.NodeName
ls, err := tkmm.getNodeLabels(nodeName, locationLabels)
if err != nil || len(ls) == 0 {
- glog.Warningf("node: [%s] has no node labels, skipping set store labels for Pod: [%s/%s]", nodeName, ns, podName)
+ klog.Warningf("node: [%s] has no node labels, skipping set store labels for Pod: [%s/%s]", nodeName, ns, podName)
continue
}
if !tkmm.storeLabelsEqualNodeLabels(store.Store.Labels, ls) {
set, err := pdCli.SetStoreLabels(store.Store.Id, ls)
if err != nil {
- glog.Warningf("failed to set pod: [%s/%s]'s store labels: %v", ns, podName, ls)
+ klog.Warningf("failed to set pod: [%s/%s]'s store labels: %v", ns, podName, ls)
continue
}
if set {
setCount++
- glog.Infof("pod: [%s/%s] set labels: %v successfully", ns, podName, ls)
+ klog.Infof("pod: [%s/%s] set labels: %v successfully", ns, podName, ls)
}
}
}
diff --git a/pkg/manager/member/tikv_member_manager_test.go b/pkg/manager/member/tikv_member_manager_test.go
index 75470093ff..773c778ce9 100644
--- a/pkg/manager/member/tikv_member_manager_test.go
+++ b/pkg/manager/member/tikv_member_manager_test.go
@@ -79,7 +79,7 @@ func TestTiKVMemberManagerSyncCreate(t *testing.T) {
pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) {
return &v1alpha1.PDConfig{
Replication: &v1alpha1.PDReplicationConfig{
- LocationLabels: v1alpha1.StringSlice{"region", "zone", "rack", "host"},
+ LocationLabels: []string{"region", "zone", "rack", "host"},
},
}, nil
})
@@ -219,9 +219,9 @@ func TestTiKVMemberManagerSyncUpdate(t *testing.T) {
tkmm, fakeSetControl, fakeSvcControl, pdClient, _, _ := newFakeTiKVMemberManager(tc)
pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) {
- return &v1alpha1.PDConfig{
- Replication: &v1alpha1.PDReplicationConfig{
- LocationLabels: v1alpha1.StringSlice{"region", "zone", "rack", "host"},
+ return &pdapi.PDConfigFromAPI{
+ Replication: &pdapi.PDReplicationConfig{
+ LocationLabels: []string{"region", "zone", "rack", "host"},
},
}, nil
})
@@ -481,9 +481,9 @@ func TestTiKVMemberManagerSetStoreLabelsForTiKV(t *testing.T) {
tc := newTidbClusterForPD()
pmm, _, _, pdClient, podIndexer, nodeIndexer := newFakeTiKVMemberManager(tc)
pdClient.AddReaction(pdapi.GetConfigActionType, func(action *pdapi.Action) (interface{}, error) {
- return &v1alpha1.PDConfig{
- Replication: &v1alpha1.PDReplicationConfig{
- LocationLabels: v1alpha1.StringSlice{"region", "zone", "rack", "host"},
+ return &pdapi.PDConfigFromAPI{
+ Replication: &pdapi.PDReplicationConfig{
+ LocationLabels: []string{"region", "zone", "rack", "host"},
},
}, nil
})
diff --git a/pkg/manager/member/tikv_scaler.go b/pkg/manager/member/tikv_scaler.go
index 8d56cd0a73..5522dc68bc 100644
--- a/pkg/manager/member/tikv_scaler.go
+++ b/pkg/manager/member/tikv_scaler.go
@@ -18,13 +18,15 @@ import (
"strconv"
"time"
+ "github.com/pingcap/advanced-statefulset/pkg/apis/apps/v1/helper"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/label"
"github.com/pingcap/tidb-operator/pkg/pdapi"
+ "github.com/pingcap/tidb-operator/pkg/util"
apps "k8s.io/api/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
)
@@ -48,7 +50,8 @@ func (tsd *tikvScaler) Scale(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet,
} else if scaling < 0 {
return tsd.ScaleIn(tc, oldSet, newSet)
}
- return nil
+ // we only sync auto scaler annotations when we are finishing syncing scaling
+ return tsd.SyncAutoScalerAnn(tc, oldSet)
}
func (tsd *tikvScaler) ScaleOut(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error {
@@ -58,7 +61,7 @@ func (tsd *tikvScaler) ScaleOut(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulS
return nil
}
- glog.Infof("scaling out tikv statefulset %s/%s, ordinal: %d (replicas: %d, delete slots: %v)", oldSet.Namespace, oldSet.Name, ordinal, replicas, deleteSlots.List())
+ klog.Infof("scaling out tikv statefulset %s/%s, ordinal: %d (replicas: %d, delete slots: %v)", oldSet.Namespace, oldSet.Name, ordinal, replicas, deleteSlots.List())
_, err := tsd.deleteDeferDeletingPVC(tc, oldSet.GetName(), v1alpha1.TiKVMemberType, ordinal)
if err != nil {
return err
@@ -78,12 +81,12 @@ func (tsd *tikvScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe
// tikv can not scale in when it is upgrading
if tc.TiKVUpgrading() {
- glog.Infof("the TidbCluster: [%s/%s]'s tikv is upgrading,can not scale in until upgrade have completed",
+ klog.Infof("the TidbCluster: [%s/%s]'s tikv is upgrading,can not scale in until upgrade have completed",
ns, tcName)
return nil
}
- glog.Infof("scaling in tikv statefulset %s/%s, ordinal: %d (replicas: %d, delete slots: %v)", oldSet.Namespace, oldSet.Name, ordinal, replicas, deleteSlots.List())
+ klog.Infof("scaling in tikv statefulset %s/%s, ordinal: %d (replicas: %d, delete slots: %v)", oldSet.Namespace, oldSet.Name, ordinal, replicas, deleteSlots.List())
// We need remove member from cluster before reducing statefulset replicas
podName := ordinalPodName(v1alpha1.TiKVMemberType, tcName, ordinal)
pod, err := tsd.podLister.Pods(ns).Get(podName)
@@ -105,10 +108,10 @@ func (tsd *tikvScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe
}
if state != v1alpha1.TiKVStateOffline {
if err := controller.GetPDClient(tsd.pdControl, tc).DeleteStore(id); err != nil {
- glog.Errorf("tikv scale in: failed to delete store %d, %v", id, err)
+ klog.Errorf("tikv scale in: failed to delete store %d, %v", id, err)
return err
}
- glog.Infof("tikv scale in: delete store %d for tikv %s/%s successfully", id, ns, podName)
+ klog.Infof("tikv scale in: delete store %d for tikv %s/%s successfully", id, ns, podName)
}
return controller.RequeueErrorf("TiKV %s/%s store %d still in cluster, state: %s", ns, podName, id, state)
}
@@ -121,7 +124,7 @@ func (tsd *tikvScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe
}
// TODO: double check if store is really not in Up/Offline/Down state
- glog.Infof("TiKV %s/%s store %d becomes tombstone", ns, podName, id)
+ klog.Infof("TiKV %s/%s store %d becomes tombstone", ns, podName, id)
pvcName := ordinalPVCName(v1alpha1.TiKVMemberType, setName, ordinal)
pvc, err := tsd.pvcLister.PersistentVolumeClaims(ns).Get(pvcName)
@@ -135,11 +138,11 @@ func (tsd *tikvScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe
pvc.Annotations[label.AnnPVCDeferDeleting] = now
_, err = tsd.pvcControl.UpdatePVC(tc, pvc)
if err != nil {
- glog.Errorf("tikv scale in: failed to set pvc %s/%s annotation: %s to %s",
+ klog.Errorf("tikv scale in: failed to set pvc %s/%s annotation: %s to %s",
ns, pvcName, label.AnnPVCDeferDeleting, now)
return err
}
- glog.Infof("tikv scale in: set pvc %s/%s annotation: %s to %s",
+ klog.Infof("tikv scale in: set pvc %s/%s annotation: %s to %s",
ns, pvcName, label.AnnPVCDeferDeleting, now)
setReplicasAndDeleteSlots(newSet, replicas, deleteSlots)
@@ -178,11 +181,11 @@ func (tsd *tikvScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe
pvc.Annotations[label.AnnPVCDeferDeleting] = now
_, err = tsd.pvcControl.UpdatePVC(tc, pvc)
if err != nil {
- glog.Errorf("pod %s not ready, tikv scale in: failed to set pvc %s/%s annotation: %s to %s",
+ klog.Errorf("pod %s not ready, tikv scale in: failed to set pvc %s/%s annotation: %s to %s",
podName, ns, pvcName, label.AnnPVCDeferDeleting, now)
return err
}
- glog.Infof("pod %s not ready, tikv scale in: set pvc %s/%s annotation: %s to %s",
+ klog.Infof("pod %s not ready, tikv scale in: set pvc %s/%s annotation: %s to %s",
podName, ns, pvcName, label.AnnPVCDeferDeleting, now)
setReplicasAndDeleteSlots(newSet, replicas, deleteSlots)
return nil
@@ -190,6 +193,32 @@ func (tsd *tikvScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe
return fmt.Errorf("TiKV %s/%s not found in cluster", ns, podName)
}
+// SyncAutoScalerAnn would reclaim the auto-scaling out slots if the target pod is no longer existed
+func (tsd *tikvScaler) SyncAutoScalerAnn(tc *v1alpha1.TidbCluster, actual *apps.StatefulSet) error {
+ currentScalingSlots := util.GetAutoScalingOutSlots(tc, v1alpha1.TiKVMemberType)
+ if currentScalingSlots.Len() < 1 {
+ return nil
+ }
+ currentOrdinals := helper.GetPodOrdinals(tc.Spec.TiKV.Replicas, actual)
+
+ // reclaim the auto-scaling out slots if the target pod is no longer existed
+ if !currentOrdinals.HasAll(currentScalingSlots.List()...) {
+ reclaimedSlots := currentScalingSlots.Difference(currentOrdinals)
+ currentScalingSlots = currentScalingSlots.Delete(reclaimedSlots.List()...)
+ if currentScalingSlots.Len() < 1 {
+ delete(tc.Annotations, label.AnnTiKVAutoScalingOutOrdinals)
+ return nil
+ }
+ v, err := util.Encode(currentScalingSlots.List())
+ if err != nil {
+ return err
+ }
+ tc.Annotations[label.AnnTiKVAutoScalingOutOrdinals] = v
+ return nil
+ }
+ return nil
+}
+
type fakeTiKVScaler struct{}
// NewFakeTiKVScaler returns a fake tikv Scaler
@@ -215,3 +244,7 @@ func (fsd *fakeTiKVScaler) ScaleIn(_ *v1alpha1.TidbCluster, oldSet *apps.Statefu
setReplicasAndDeleteSlots(newSet, *oldSet.Spec.Replicas-1, nil)
return nil
}
+
+func (fsd *fakeTiKVScaler) SyncAutoScalerAnn(tc *v1alpha1.TidbCluster, actual *apps.StatefulSet) error {
+ return nil
+}
diff --git a/pkg/manager/member/tikv_upgrader.go b/pkg/manager/member/tikv_upgrader.go
index 7b0e0d9303..3d03b507c5 100644
--- a/pkg/manager/member/tikv_upgrader.go
+++ b/pkg/manager/member/tikv_upgrader.go
@@ -25,7 +25,7 @@ import (
apps "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
const (
@@ -83,12 +83,13 @@ func (tku *tikvUpgrader) Upgrade(tc *v1alpha1.TidbCluster, oldSet *apps.Stateful
// If we encounter this situation, we will let the native statefulset controller do the upgrade completely, which may be unsafe for upgrading tikv.
// Therefore, in the production environment, we should try to avoid modifying the tikv statefulset update strategy directly.
newSet.Spec.UpdateStrategy = oldSet.Spec.UpdateStrategy
- glog.Warningf("tidbcluster: [%s/%s] tikv statefulset %s UpdateStrategy has been modified manually", ns, tcName, oldSet.GetName())
+ klog.Warningf("tidbcluster: [%s/%s] tikv statefulset %s UpdateStrategy has been modified manually", ns, tcName, oldSet.GetName())
return nil
}
if controller.PodWebhookEnabled {
setUpgradePartition(newSet, 0)
+ return nil
}
setUpgradePartition(newSet, *oldSet.Spec.UpdateStrategy.RollingUpdate.Partition)
@@ -170,7 +171,7 @@ func (tku *tikvUpgrader) readyToUpgrade(upgradePod *corev1.Pod, store v1alpha1.T
if evictLeaderBeginTimeStr, evicting := upgradePod.Annotations[EvictLeaderBeginTime]; evicting {
evictLeaderBeginTime, err := time.Parse(time.RFC3339, evictLeaderBeginTimeStr)
if err != nil {
- glog.Errorf("parse annotation:[%s] to time failed.", EvictLeaderBeginTime)
+ klog.Errorf("parse annotation:[%s] to time failed.", EvictLeaderBeginTime)
return false
}
if time.Now().After(evictLeaderBeginTime.Add(EvictLeaderTimeout)) {
@@ -185,11 +186,11 @@ func (tku *tikvUpgrader) beginEvictLeader(tc *v1alpha1.TidbCluster, storeID uint
podName := pod.GetName()
err := controller.GetPDClient(tku.pdControl, tc).BeginEvictLeader(storeID)
if err != nil {
- glog.Errorf("tikv upgrader: failed to begin evict leader: %d, %s/%s, %v",
+ klog.Errorf("tikv upgrader: failed to begin evict leader: %d, %s/%s, %v",
storeID, ns, podName, err)
return err
}
- glog.Infof("tikv upgrader: begin evict leader: %d, %s/%s successfully", storeID, ns, podName)
+ klog.Infof("tikv upgrader: begin evict leader: %d, %s/%s successfully", storeID, ns, podName)
if pod.Annotations == nil {
pod.Annotations = map[string]string{}
}
@@ -197,11 +198,11 @@ func (tku *tikvUpgrader) beginEvictLeader(tc *v1alpha1.TidbCluster, storeID uint
pod.Annotations[EvictLeaderBeginTime] = now
_, err = tku.podControl.UpdatePod(tc, pod)
if err != nil {
- glog.Errorf("tikv upgrader: failed to set pod %s/%s annotation %s to %s, %v",
+ klog.Errorf("tikv upgrader: failed to set pod %s/%s annotation %s to %s, %v",
ns, podName, EvictLeaderBeginTime, now, err)
return err
}
- glog.Infof("tikv upgrader: set pod %s/%s annotation %s to %s successfully",
+ klog.Infof("tikv upgrader: set pod %s/%s annotation %s to %s successfully",
ns, podName, EvictLeaderBeginTime, now)
return nil
}
@@ -219,10 +220,10 @@ func (tku *tikvUpgrader) endEvictLeader(tc *v1alpha1.TidbCluster, ordinal int32)
err = tku.pdControl.GetPDClient(pdapi.Namespace(tc.GetNamespace()), tc.GetName(), tc.IsTLSClusterEnabled()).EndEvictLeader(storeID)
if err != nil {
- glog.Errorf("tikv upgrader: failed to end evict leader storeID: %d ordinal: %d, %v", storeID, ordinal, err)
+ klog.Errorf("tikv upgrader: failed to end evict leader storeID: %d ordinal: %d, %v", storeID, ordinal, err)
return err
}
- glog.Infof("tikv upgrader: end evict leader storeID: %d ordinal: %d successfully", storeID, ordinal)
+ klog.Infof("tikv upgrader: end evict leader storeID: %d ordinal: %d successfully", storeID, ordinal)
return nil
}
diff --git a/pkg/manager/member/utils.go b/pkg/manager/member/utils.go
index ee251127b6..c631b4357b 100644
--- a/pkg/manager/member/utils.go
+++ b/pkg/manager/member/utils.go
@@ -24,11 +24,12 @@ import (
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/label"
+ "github.com/pingcap/tidb-operator/pkg/util"
apps "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
const (
@@ -71,7 +72,7 @@ func statefulSetIsUpgrading(set *apps.StatefulSet) bool {
// SetStatefulSetLastAppliedConfigAnnotation set last applied config to Statefulset's annotation
func SetStatefulSetLastAppliedConfigAnnotation(set *apps.StatefulSet) error {
- setApply, err := encode(set.Spec)
+ setApply, err := util.Encode(set.Spec)
if err != nil {
return err
}
@@ -97,14 +98,6 @@ func GetLastAppliedConfig(set *apps.StatefulSet) (*apps.StatefulSetSpec, *corev1
return spec, &spec.Template.Spec, nil
}
-func encode(obj interface{}) (string, error) {
- b, err := json.Marshal(obj)
- if err != nil {
- return "", err
- }
- return string(b), nil
-}
-
// statefulSetEqual compares the new Statefulset's spec with old Statefulset's last applied config
func statefulSetEqual(new apps.StatefulSet, old apps.StatefulSet) bool {
if !apiequality.Semantic.DeepEqual(new.Annotations, old.Annotations) {
@@ -114,7 +107,7 @@ func statefulSetEqual(new apps.StatefulSet, old apps.StatefulSet) bool {
if lastAppliedConfig, ok := old.Annotations[LastAppliedConfigAnnotation]; ok {
err := json.Unmarshal([]byte(lastAppliedConfig), &oldConfig)
if err != nil {
- glog.Errorf("unmarshal Statefulset: [%s/%s]'s applied config failed,error: %v", old.GetNamespace(), old.GetName(), err)
+ klog.Errorf("unmarshal Statefulset: [%s/%s]'s applied config failed,error: %v", old.GetNamespace(), old.GetName(), err)
return false
}
return apiequality.Semantic.DeepEqual(oldConfig.Replicas, new.Spec.Replicas) &&
@@ -131,7 +124,7 @@ func templateEqual(new *apps.StatefulSet, old *apps.StatefulSet) bool {
if ok {
err := json.Unmarshal([]byte(lastAppliedConfig), &oldStsSpec)
if err != nil {
- glog.Errorf("unmarshal PodTemplate: [%s/%s]'s applied config failed,error: %v", old.GetNamespace(), old.GetName(), err)
+ klog.Errorf("unmarshal PodTemplate: [%s/%s]'s applied config failed,error: %v", old.GetNamespace(), old.GetName(), err)
return false
}
return apiequality.Semantic.DeepEqual(oldStsSpec.Template.Spec, new.Spec.Template.Spec)
@@ -142,7 +135,7 @@ func templateEqual(new *apps.StatefulSet, old *apps.StatefulSet) bool {
// setUpgradePartition set statefulSet's rolling update partition
func setUpgradePartition(set *apps.StatefulSet, upgradeOrdinal int32) {
set.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateStatefulSetStrategy{Partition: &upgradeOrdinal}
- glog.Infof("set %s/%s partition to %d", set.GetNamespace(), set.GetName(), upgradeOrdinal)
+ klog.Infof("set %s/%s partition to %d", set.GetNamespace(), set.GetName(), upgradeOrdinal)
}
func imagePullFailed(pod *corev1.Pod) bool {
@@ -216,6 +209,10 @@ func MarshalTOML(v interface{}) ([]byte, error) {
return data, nil
}
+func UnmarshalTOML(b []byte, obj interface{}) error {
+ return toml.Unmarshal(b, obj)
+}
+
func Sha256Sum(v interface{}) (string, error) {
data, err := json.Marshal(v)
if err != nil {
@@ -271,6 +268,12 @@ func MapContainers(podSpec *corev1.PodSpec) map[string]corev1.Container {
// updateStatefulSet is a template function to update the statefulset of components
func updateStatefulSet(setCtl controller.StatefulSetControlInterface, tc *v1alpha1.TidbCluster, newSet, oldSet *apps.StatefulSet) error {
isOrphan := metav1.GetControllerOf(oldSet) == nil
+ if newSet.Annotations == nil {
+ newSet.Annotations = map[string]string{}
+ }
+ if oldSet.Annotations == nil {
+ oldSet.Annotations = map[string]string{}
+ }
if !statefulSetEqual(*newSet, *oldSet) || isOrphan {
set := *oldSet
// Retain the deprecated last applied pod template annotation for backward compatibility
@@ -284,6 +287,10 @@ func updateStatefulSet(setCtl controller.StatefulSetControlInterface, tc *v1alph
set.Spec.Template.Annotations[LastAppliedConfigAnnotation] = podConfig
}
set.Annotations = newSet.Annotations
+ v, ok := oldSet.Annotations[label.AnnStsLastSyncTimestamp]
+ if ok {
+ set.Annotations[label.AnnStsLastSyncTimestamp] = v
+ }
*set.Spec.Replicas = *newSet.Spec.Replicas
set.Spec.UpdateStrategy = newSet.Spec.UpdateStrategy
if isOrphan {
@@ -300,3 +307,17 @@ func updateStatefulSet(setCtl controller.StatefulSetControlInterface, tc *v1alph
return nil
}
+
+func clusterSecretName(tc *v1alpha1.TidbCluster, component string) string {
+ return fmt.Sprintf("%s-%s-cluster-secret", tc.Name, component)
+}
+
+// filter targetContainer by containerName, If not find, then return nil
+func filterContainer(sts *apps.StatefulSet, containerName string) *corev1.Container {
+ for _, c := range sts.Spec.Template.Spec.Containers {
+ if c.Name == containerName {
+ return &c
+ }
+ }
+ return nil
+}
diff --git a/pkg/monitor/monitor/monitor_manager.go b/pkg/monitor/monitor/monitor_manager.go
index bfe2a2d58f..7fafa365a5 100644
--- a/pkg/monitor/monitor/monitor_manager.go
+++ b/pkg/monitor/monitor/monitor_manager.go
@@ -20,7 +20,11 @@ import (
informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions"
v1alpha1listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
+ utildiscovery "github.com/pingcap/tidb-operator/pkg/util/discovery"
corev1 "k8s.io/api/core/v1"
+ rbac "k8s.io/api/rbac/v1"
+ "k8s.io/client-go/discovery"
+ discoverycachedmemory "k8s.io/client-go/discovery/cached/memory"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
appslisters "k8s.io/client-go/listers/apps/v1"
@@ -30,12 +34,13 @@ import (
)
type MonitorManager struct {
- typedControl controller.TypedControlInterface
- deploymentLister appslisters.DeploymentLister
- tcLister v1alpha1listers.TidbClusterLister
- pvLister corelisters.PersistentVolumeLister
- pvControl controller.PVControlInterface
- recorder record.EventRecorder
+ discoveryInterface discovery.CachedDiscoveryInterface
+ typedControl controller.TypedControlInterface
+ deploymentLister appslisters.DeploymentLister
+ tcLister v1alpha1listers.TidbClusterLister
+ pvLister corelisters.PersistentVolumeLister
+ pvControl controller.PVControlInterface
+ recorder record.EventRecorder
}
const (
@@ -52,12 +57,13 @@ func NewMonitorManager(
pvcLister := kubeInformerFactory.Core().V1().PersistentVolumeClaims().Lister()
pvLister := kubeInformerFactory.Core().V1().PersistentVolumes().Lister()
return &MonitorManager{
- typedControl: typedControl,
- deploymentLister: kubeInformerFactory.Apps().V1().Deployments().Lister(),
- tcLister: informerFactory.Pingcap().V1alpha1().TidbClusters().Lister(),
- pvControl: controller.NewRealPVControl(kubeCli, pvcLister, pvLister, recorder),
- pvLister: pvLister,
- recorder: recorder,
+ discoveryInterface: discoverycachedmemory.NewMemCacheClient(kubeCli.Discovery()),
+ typedControl: typedControl,
+ deploymentLister: kubeInformerFactory.Apps().V1().Deployments().Lister(),
+ tcLister: informerFactory.Pingcap().V1alpha1().TidbClusters().Lister(),
+ pvControl: controller.NewRealPVControl(kubeCli, pvcLister, pvLister, recorder),
+ pvLister: pvLister,
+ recorder: recorder,
}
}
@@ -105,8 +111,8 @@ func (mm *MonitorManager) Sync(monitor *v1alpha1.TidbMonitor) error {
}
func (mm *MonitorManager) syncTidbMonitorService(monitor *v1alpha1.TidbMonitor) error {
- service := getMonitorService(monitor)
- for _, svc := range service {
+ services := getMonitorService(monitor)
+ for _, svc := range services {
_, err := mm.typedControl.CreateOrUpdateService(monitor, svc)
if err != nil {
klog.Errorf("tm[%s/%s]'s service[%s] failed to sync,err: %v", monitor.Namespace, monitor.Name, svc.Name, err)
@@ -147,6 +153,10 @@ func (mm *MonitorManager) syncTidbMonitorDeployment(monitor *v1alpha1.TidbMonito
}
targetTcRef := monitor.Spec.Clusters[0]
+ if len(targetTcRef.Namespace) < 1 {
+ targetTcRef.Namespace = monitor.Namespace
+ }
+
tc, err := mm.tcLister.TidbClusters(targetTcRef.Namespace).Get(targetTcRef.Name)
if err != nil {
return err
@@ -169,7 +179,11 @@ func (mm *MonitorManager) syncTidbMonitorDeployment(monitor *v1alpha1.TidbMonito
return err
}
- deployment := getMonitorDeployment(sa, cm, secret, monitor, tc)
+ deployment, err := getMonitorDeployment(sa, cm, secret, monitor, tc)
+ if err != nil {
+ klog.Errorf("tm[%s/%s]'s deployment failed to generate,err: %v", monitor.Namespace, monitor.Name, err)
+ return err
+ }
_, err = mm.typedControl.CreateOrUpdateDeployment(monitor, deployment)
if err != nil {
klog.Errorf("tm[%s/%s]'s deployment failed to sync,err: %v", monitor.Namespace, monitor.Name, err)
@@ -203,8 +217,31 @@ func (mm *MonitorManager) syncTidbMonitorRbac(monitor *v1alpha1.TidbMonitor) (*c
klog.Errorf("tm[%s/%s]'s serviceaccount failed to sync,err: %v", monitor.Namespace, monitor.Name, err)
return nil, err
}
+ policyRules := []rbac.PolicyRule{
+ {
+ APIGroups: []string{""},
+ Resources: []string{"pods"},
+ Verbs: []string{"get", "list", "watch"},
+ },
+ }
+ if supported, err := utildiscovery.IsAPIGroupVersionSupported(mm.discoveryInterface, "security.openshift.io/v1"); err != nil {
+ return nil, err
+ } else if supported {
+ // We must use 'anyuid' SecurityContextConstraint to run our container as root.
+ // https://docs.openshift.com/container-platform/4.3/authentication/managing-security-context-constraints.html
+ policyRules = append(policyRules, rbac.PolicyRule{
+ APIGroups: []string{"security.openshift.io"},
+ ResourceNames: []string{"anyuid"},
+ Resources: []string{"securitycontextconstraints"},
+ Verbs: []string{"use"},
+ })
+ }
if controller.ClusterScoped {
- cr := getMonitorClusterRole(monitor)
+ policyRules = append(policyRules, rbac.PolicyRule{
+ NonResourceURLs: []string{"/metrics"},
+ Verbs: []string{"get"},
+ })
+ cr := getMonitorClusterRole(monitor, policyRules)
cr, err = mm.typedControl.CreateOrUpdateClusterRole(monitor, cr)
if err != nil {
klog.Errorf("tm[%s/%s]'s clusterrole failed to sync,err: %v", monitor.Namespace, monitor.Name, err)
@@ -219,7 +256,7 @@ func (mm *MonitorManager) syncTidbMonitorRbac(monitor *v1alpha1.TidbMonitor) (*c
return sa, nil
}
- role := getMonitorRole(monitor)
+ role := getMonitorRole(monitor, policyRules)
role, err = mm.typedControl.CreateOrUpdateRole(monitor, role)
if err != nil {
klog.Errorf("tm[%s/%s]'s role failed to sync,err: %v", monitor.Namespace, monitor.Name, err)
diff --git a/pkg/monitor/monitor/template.go b/pkg/monitor/monitor/template.go
index 3de9ae28e1..7cde743fda 100644
--- a/pkg/monitor/monitor/template.go
+++ b/pkg/monitor/monitor/template.go
@@ -14,12 +14,16 @@
package monitor
import (
- "time"
-
+ "fmt"
+ "github.com/pingcap/tidb-operator/pkg/label"
+ "github.com/pingcap/tidb-operator/pkg/util"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/config"
"gopkg.in/yaml.v2"
+ corev1 "k8s.io/api/core/v1"
"k8s.io/klog"
+ "path"
+ "time"
)
const (
@@ -32,9 +36,6 @@ const (
podNameLabel = "__meta_kubernetes_pod_name"
nodeNameLabel = "__meta_kubernetes_pod_node_name"
podIPLabel = "__meta_kubernetes_pod_ip"
- caFilePath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
- certFilePath = "/var/lib/pd-client-tls/cert"
- keyFilePath = "/var/lib/pd-client-tls/key"
)
var (
@@ -44,6 +45,7 @@ var (
tikvPattern config.Regexp
pdPattern config.Regexp
tidbPattern config.Regexp
+ addressPattern config.Regexp
dashBoardConfig = `{
"apiVersion": 1,
"providers": [
@@ -74,7 +76,7 @@ func init() {
if err != nil {
klog.Fatalf("monitor regex template parse error,%v", err)
}
- tikvPattern, err = config.NewRegexp(".*\\-tikv\\-\\d*$")
+ tikvPattern, err = config.NewRegexp("tikv")
if err != nil {
klog.Fatalf("monitor regex template parse error,%v", err)
}
@@ -86,6 +88,10 @@ func init() {
if err != nil {
klog.Fatalf("monitor regex template parse error,%v", err)
}
+ addressPattern, err = config.NewRegexp("(.+);(.+);(.+)")
+ if err != nil {
+ klog.Fatalf("monitor regex template parse error,%v", err)
+ }
}
type MonitorConfigModel struct {
@@ -97,11 +103,6 @@ type MonitorConfigModel struct {
func newPrometheusConfig(cmodel *MonitorConfigModel) *config.Config {
var c = config.Config{
- AlertingConfig: config.AlertingConfig{
- AlertRelabelConfigs: nil,
- AlertmanagerConfigs: nil,
- XXX: nil,
- },
GlobalConfig: config.GlobalConfig{
ScrapeInterval: model.Duration(15 * time.Second),
EvaluationInterval: model.Duration(15 * time.Second),
@@ -119,10 +120,35 @@ func newPrometheusConfig(cmodel *MonitorConfigModel) *config.Config {
}
func scrapeJob(name string, componentPattern config.Regexp, cmodel *MonitorConfigModel) *config.ScrapeConfig {
+
+ addressRelabelConfig := &config.RelabelConfig{
+ SourceLabels: model.LabelNames{
+ "__address__",
+ ioPortLabel,
+ },
+ Action: config.RelabelReplace,
+ Regex: portPattern,
+ Replacement: "$1:$2",
+ TargetLabel: "__address__",
+ }
+ if name == label.PDLabelVal || name == label.TiDBLabelVal || name == label.TiKVLabelVal {
+ addressRelabelConfig = &config.RelabelConfig{
+ SourceLabels: model.LabelNames{
+ podNameLabel,
+ instanceLabel,
+ ioPortLabel,
+ },
+ Action: config.RelabelReplace,
+ Regex: addressPattern,
+ Replacement: fmt.Sprintf("$1.$2-%s-peer:$3", name),
+ TargetLabel: "__address__",
+ }
+ }
return &config.ScrapeConfig{
JobName: name,
ScrapeInterval: model.Duration(15 * time.Second),
+ Scheme: "http",
HonorLabels: true,
ServiceDiscoveryConfig: config.ServiceDiscoveryConfig{
KubernetesSDConfigs: []*config.KubernetesSDConfig{
@@ -138,9 +164,6 @@ func scrapeJob(name string, componentPattern config.Regexp, cmodel *MonitorConfi
TLSConfig: config.TLSConfig{
InsecureSkipVerify: true,
},
- XXX: map[string]interface{}{
- "scheme": "http",
- },
},
RelabelConfigs: []*config.RelabelConfig{
{
@@ -172,16 +195,7 @@ func scrapeJob(name string, componentPattern config.Regexp, cmodel *MonitorConfi
TargetLabel: "__metrics_path__",
Regex: allMatchPattern,
},
- {
- SourceLabels: model.LabelNames{
- "__address__",
- ioPortLabel,
- },
- Action: config.RelabelReplace,
- Regex: portPattern,
- Replacement: "$1:$2",
- TargetLabel: "__address__",
- },
+ addressRelabelConfig,
{
SourceLabels: model.LabelNames{
namespaceLabel,
@@ -230,9 +244,7 @@ func addAlertManagerUrl(pc *config.Config, cmodel *MonitorConfigModel) {
StaticConfigs: []*config.TargetGroup{
{
Targets: []model.LabelSet{
- map[model.LabelName]model.LabelValue{
- "targets": model.LabelValue(cmodel.AlertmanagerURL),
- },
+ {model.AddressLabel: model.LabelValue(cmodel.AlertmanagerURL)},
},
},
},
@@ -245,16 +257,14 @@ func addAlertManagerUrl(pc *config.Config, cmodel *MonitorConfigModel) {
func addTlsConfig(pc *config.Config) {
for id, sconfig := range pc.ScrapeConfigs {
- // TiKV doesn't support scheme https for now.
- // And we should fix it after TiKV fix this issue: https://github.com/tikv/tikv/issues/5340
- if sconfig.JobName == "pd" || sconfig.JobName == "tidb" {
+ if sconfig.JobName == "pd" || sconfig.JobName == "tidb" || sconfig.JobName == "tikv" {
sconfig.HTTPClientConfig.TLSConfig = config.TLSConfig{
- CAFile: caFilePath,
- CertFile: certFilePath,
- KeyFile: keyFilePath,
+ CAFile: path.Join(util.ClusterClientTLSPath, corev1.ServiceAccountRootCAKey),
+ CertFile: path.Join(util.ClusterClientTLSPath, corev1.TLSCertKey),
+ KeyFile: path.Join(util.ClusterClientTLSPath, corev1.TLSPrivateKeyKey),
}
pc.ScrapeConfigs[id] = sconfig
- sconfig.HTTPClientConfig.XXX["scheme"] = "https"
+ sconfig.Scheme = "https"
}
}
}
diff --git a/pkg/monitor/monitor/template_test.go b/pkg/monitor/monitor/template_test.go
index 9a052ccf2e..6ae8fb3692 100644
--- a/pkg/monitor/monitor/template_test.go
+++ b/pkg/monitor/monitor/template_test.go
@@ -25,12 +25,18 @@ func TestRenderPrometheusConfig(t *testing.T) {
expectedContent := `global:
scrape_interval: 15s
evaluation_interval: 15s
+alerting:
+ alertmanagers:
+ - static_configs:
+ - targets:
+ - alert-url
rule_files:
- /prometheus-rules/rules/*.rules.yml
scrape_configs:
- job_name: pd
honor_labels: true
scrape_interval: 15s
+ scheme: http
kubernetes_sd_configs:
- api_server: null
role: pod
@@ -54,10 +60,11 @@ scrape_configs:
regex: (.+)
target_label: __metrics_path__
action: replace
- - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
- regex: ([^:]+)(?::\d+)?;(\d+)
+ - source_labels: [__meta_kubernetes_pod_name, __meta_kubernetes_pod_label_app_kubernetes_io_instance,
+ __meta_kubernetes_pod_annotation_prometheus_io_port]
+ regex: (.+);(.+);(.+)
target_label: __address__
- replacement: $1:$2
+ replacement: $1.$2-pd-peer:$3
action: replace
- source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace
@@ -77,6 +84,7 @@ scrape_configs:
- job_name: tidb
honor_labels: true
scrape_interval: 15s
+ scheme: http
kubernetes_sd_configs:
- api_server: null
role: pod
@@ -100,10 +108,11 @@ scrape_configs:
regex: (.+)
target_label: __metrics_path__
action: replace
- - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
- regex: ([^:]+)(?::\d+)?;(\d+)
+ - source_labels: [__meta_kubernetes_pod_name, __meta_kubernetes_pod_label_app_kubernetes_io_instance,
+ __meta_kubernetes_pod_annotation_prometheus_io_port]
+ regex: (.+);(.+);(.+)
target_label: __address__
- replacement: $1:$2
+ replacement: $1.$2-tidb-peer:$3
action: replace
- source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace
@@ -123,6 +132,7 @@ scrape_configs:
- job_name: tikv
honor_labels: true
scrape_interval: 15s
+ scheme: http
kubernetes_sd_configs:
- api_server: null
role: pod
@@ -137,7 +147,7 @@ scrape_configs:
regex: target
action: keep
- source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_component]
- regex: .*\-tikv\-\d*$
+ regex: tikv
action: keep
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
regex: "true"
@@ -146,10 +156,11 @@ scrape_configs:
regex: (.+)
target_label: __metrics_path__
action: replace
- - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
- regex: ([^:]+)(?::\d+)?;(\d+)
+ - source_labels: [__meta_kubernetes_pod_name, __meta_kubernetes_pod_label_app_kubernetes_io_instance,
+ __meta_kubernetes_pod_annotation_prometheus_io_port]
+ regex: (.+);(.+);(.+)
target_label: __address__
- replacement: $1:$2
+ replacement: $1.$2-tikv-peer:$3
action: replace
- source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace
@@ -174,6 +185,183 @@ scrape_configs:
"ns2",
},
EnableTLSCluster: false,
+ AlertmanagerURL: "alert-url",
+ }
+ content, err := RenderPrometheusConfig(model)
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(content).Should(Equal(expectedContent))
+}
+
+func TestRenderPrometheusConfigTLSEnabled(t *testing.T) {
+ g := NewGomegaWithT(t)
+ target, _ := config.NewRegexp("target")
+ expectedContent := `global:
+ scrape_interval: 15s
+ evaluation_interval: 15s
+rule_files:
+- /prometheus-rules/rules/*.rules.yml
+scrape_configs:
+- job_name: pd
+ honor_labels: true
+ scrape_interval: 15s
+ scheme: https
+ kubernetes_sd_configs:
+ - api_server: null
+ role: pod
+ namespaces:
+ names:
+ - ns1
+ - ns2
+ tls_config:
+ ca_file: /var/lib/cluster-client-tls/ca.crt
+ cert_file: /var/lib/cluster-client-tls/tls.crt
+ key_file: /var/lib/cluster-client-tls/tls.key
+ insecure_skip_verify: false
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance]
+ regex: target
+ action: keep
+ - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_component]
+ regex: pd
+ action: keep
+ - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
+ regex: "true"
+ action: keep
+ - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
+ regex: (.+)
+ target_label: __metrics_path__
+ action: replace
+ - source_labels: [__meta_kubernetes_pod_name, __meta_kubernetes_pod_label_app_kubernetes_io_instance,
+ __meta_kubernetes_pod_annotation_prometheus_io_port]
+ regex: (.+);(.+);(.+)
+ target_label: __address__
+ replacement: $1.$2-pd-peer:$3
+ action: replace
+ - source_labels: [__meta_kubernetes_namespace]
+ target_label: kubernetes_namespace
+ action: replace
+ - source_labels: [__meta_kubernetes_pod_name]
+ target_label: instance
+ action: replace
+ - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance]
+ target_label: cluster
+ action: replace
+ - source_labels: [__meta_kubernetes_pod_name]
+ target_label: instance
+ action: replace
+ - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance]
+ target_label: cluster
+ action: replace
+- job_name: tidb
+ honor_labels: true
+ scrape_interval: 15s
+ scheme: https
+ kubernetes_sd_configs:
+ - api_server: null
+ role: pod
+ namespaces:
+ names:
+ - ns1
+ - ns2
+ tls_config:
+ ca_file: /var/lib/cluster-client-tls/ca.crt
+ cert_file: /var/lib/cluster-client-tls/tls.crt
+ key_file: /var/lib/cluster-client-tls/tls.key
+ insecure_skip_verify: false
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance]
+ regex: target
+ action: keep
+ - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_component]
+ regex: tidb
+ action: keep
+ - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
+ regex: "true"
+ action: keep
+ - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
+ regex: (.+)
+ target_label: __metrics_path__
+ action: replace
+ - source_labels: [__meta_kubernetes_pod_name, __meta_kubernetes_pod_label_app_kubernetes_io_instance,
+ __meta_kubernetes_pod_annotation_prometheus_io_port]
+ regex: (.+);(.+);(.+)
+ target_label: __address__
+ replacement: $1.$2-tidb-peer:$3
+ action: replace
+ - source_labels: [__meta_kubernetes_namespace]
+ target_label: kubernetes_namespace
+ action: replace
+ - source_labels: [__meta_kubernetes_pod_name]
+ target_label: instance
+ action: replace
+ - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance]
+ target_label: cluster
+ action: replace
+ - source_labels: [__meta_kubernetes_pod_name]
+ target_label: instance
+ action: replace
+ - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance]
+ target_label: cluster
+ action: replace
+- job_name: tikv
+ honor_labels: true
+ scrape_interval: 15s
+ scheme: https
+ kubernetes_sd_configs:
+ - api_server: null
+ role: pod
+ namespaces:
+ names:
+ - ns1
+ - ns2
+ tls_config:
+ ca_file: /var/lib/cluster-client-tls/ca.crt
+ cert_file: /var/lib/cluster-client-tls/tls.crt
+ key_file: /var/lib/cluster-client-tls/tls.key
+ insecure_skip_verify: false
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance]
+ regex: target
+ action: keep
+ - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_component]
+ regex: tikv
+ action: keep
+ - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
+ regex: "true"
+ action: keep
+ - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
+ regex: (.+)
+ target_label: __metrics_path__
+ action: replace
+ - source_labels: [__meta_kubernetes_pod_name, __meta_kubernetes_pod_label_app_kubernetes_io_instance,
+ __meta_kubernetes_pod_annotation_prometheus_io_port]
+ regex: (.+);(.+);(.+)
+ target_label: __address__
+ replacement: $1.$2-tikv-peer:$3
+ action: replace
+ - source_labels: [__meta_kubernetes_namespace]
+ target_label: kubernetes_namespace
+ action: replace
+ - source_labels: [__meta_kubernetes_pod_name]
+ target_label: instance
+ action: replace
+ - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance]
+ target_label: cluster
+ action: replace
+ - source_labels: [__meta_kubernetes_pod_name]
+ target_label: instance
+ action: replace
+ - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance]
+ target_label: cluster
+ action: replace
+`
+ model := &MonitorConfigModel{
+ ReleaseTargetRegex: &target,
+ ReleaseNamespaces: []string{
+ "ns1",
+ "ns2",
+ },
+ EnableTLSCluster: true,
}
content, err := RenderPrometheusConfig(model)
g.Expect(err).NotTo(HaveOccurred())
diff --git a/pkg/monitor/monitor/util.go b/pkg/monitor/monitor/util.go
index ddfc1b143f..8e5b18ff68 100644
--- a/pkg/monitor/monitor/util.go
+++ b/pkg/monitor/monitor/util.go
@@ -14,12 +14,15 @@
package monitor
import (
+ "encoding/json"
"fmt"
+ "sort"
"strconv"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/label"
+ "github.com/pingcap/tidb-operator/pkg/util"
"github.com/prometheus/prometheus/config"
apps "k8s.io/api/apps/v1"
core "k8s.io/api/core/v1"
@@ -34,6 +37,10 @@ func GetMonitorObjectName(monitor *v1alpha1.TidbMonitor) string {
return fmt.Sprintf("%s-monitor", monitor.Name)
}
+func buildTidbMonitorLabel(name string) map[string]string {
+ return label.NewMonitor().Instance(name).Monitor().Labels()
+}
+
// getMonitorConfigMap generate the Prometheus config and Grafana config for TidbMonitor,
// If the namespace in ClusterRef is empty, we would set the TidbMonitor's namespace in the default
func getMonitorConfigMap(tc *v1alpha1.TidbCluster, monitor *v1alpha1.TidbMonitor) (*core.ConfigMap, error) {
@@ -67,12 +74,11 @@ func getMonitorConfigMap(tc *v1alpha1.TidbCluster, monitor *v1alpha1.TidbMonitor
return nil, err
}
- monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
cm := &core.ConfigMap{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
- Labels: monitorLabel,
+ Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
Data: map[string]string{
@@ -86,12 +92,11 @@ func getMonitorConfigMap(tc *v1alpha1.TidbCluster, monitor *v1alpha1.TidbMonitor
}
func getMonitorSecret(monitor *v1alpha1.TidbMonitor) *core.Secret {
- monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
return &core.Secret{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
- Labels: monitorLabel,
+ Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
Data: map[string][]byte{
@@ -102,67 +107,47 @@ func getMonitorSecret(monitor *v1alpha1.TidbMonitor) *core.Secret {
}
func getMonitorServiceAccount(monitor *v1alpha1.TidbMonitor) *core.ServiceAccount {
- monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
sa := &core.ServiceAccount{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
- Labels: monitorLabel,
+ Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
}
return sa
}
-func getMonitorClusterRole(monitor *v1alpha1.TidbMonitor) *rbac.ClusterRole {
- monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
+func getMonitorClusterRole(monitor *v1alpha1.TidbMonitor, policyRules []rbac.PolicyRule) *rbac.ClusterRole {
return &rbac.ClusterRole{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
- Labels: monitorLabel,
+ Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
- Rules: []rbac.PolicyRule{
- {
- APIGroups: []string{""},
- Resources: []string{"pods"},
- Verbs: []string{"get", "list", "watch"},
- },
- {
- NonResourceURLs: []string{"/metrics"},
- Verbs: []string{"get"},
- },
- },
+ Rules: policyRules,
}
}
-func getMonitorRole(monitor *v1alpha1.TidbMonitor) *rbac.Role {
- monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
+func getMonitorRole(monitor *v1alpha1.TidbMonitor, policyRules []rbac.PolicyRule) *rbac.Role {
return &rbac.Role{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
- Labels: monitorLabel,
+ Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
- Rules: []rbac.PolicyRule{
- {
- APIGroups: []string{""},
- Resources: []string{"pods"},
- Verbs: []string{"get", "list", "watch"},
- },
- },
+ Rules: policyRules,
}
}
func getMonitorClusterRoleBinding(sa *core.ServiceAccount, cr *rbac.ClusterRole, monitor *v1alpha1.TidbMonitor) *rbac.ClusterRoleBinding {
- monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
return &rbac.ClusterRoleBinding{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
- Labels: monitorLabel,
+ Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
Subjects: []rbac.Subject{
@@ -182,12 +167,11 @@ func getMonitorClusterRoleBinding(sa *core.ServiceAccount, cr *rbac.ClusterRole,
}
func getMonitorRoleBinding(sa *core.ServiceAccount, role *rbac.Role, monitor *v1alpha1.TidbMonitor) *rbac.RoleBinding {
- monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
return &rbac.RoleBinding{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
- Labels: monitorLabel,
+ Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
},
Subjects: []rbac.Subject{
@@ -206,7 +190,7 @@ func getMonitorRoleBinding(sa *core.ServiceAccount, role *rbac.Role, monitor *v1
}
}
-func getMonitorDeployment(sa *core.ServiceAccount, config *core.ConfigMap, secret *core.Secret, monitor *v1alpha1.TidbMonitor, tc *v1alpha1.TidbCluster) *apps.Deployment {
+func getMonitorDeployment(sa *core.ServiceAccount, config *core.ConfigMap, secret *core.Secret, monitor *v1alpha1.TidbMonitor, tc *v1alpha1.TidbCluster) (*apps.Deployment, error) {
deployment := getMonitorDeploymentSkeleton(sa, monitor)
initContainer := getMonitorInitContainer(monitor, tc)
deployment.Spec.Template.Spec.InitContainers = append(deployment.Spec.Template.Spec.InitContainers, initContainer)
@@ -219,19 +203,25 @@ func getMonitorDeployment(sa *core.ServiceAccount, config *core.ConfigMap, secre
}
volumes := getMonitorVolumes(config, monitor, tc)
deployment.Spec.Template.Spec.Volumes = volumes
- return deployment
+ b, err := json.Marshal(deployment.Spec.Template.Spec)
+ if err != nil {
+ return nil, err
+ }
+ if deployment.Annotations == nil {
+ deployment.Annotations = map[string]string{}
+ }
+ deployment.Annotations[controller.LastAppliedPodTemplate] = string(b)
+ return deployment, nil
}
func getMonitorDeploymentSkeleton(sa *core.ServiceAccount, monitor *v1alpha1.TidbMonitor) *apps.Deployment {
- monitorLabel := label.New().Instance(monitor.Name).Monitor()
replicas := int32(1)
- labels := label.NewMonitor().Instance(monitor.Name).Monitor()
deployment := &apps.Deployment{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
- Labels: monitorLabel,
+ Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
Annotations: monitor.Spec.Annotations,
},
@@ -241,11 +231,11 @@ func getMonitorDeploymentSkeleton(sa *core.ServiceAccount, monitor *v1alpha1.Tid
Type: apps.RecreateDeploymentStrategyType,
},
Selector: &meta.LabelSelector{
- MatchLabels: labels,
+ MatchLabels: buildTidbMonitorLabel(monitor.Name),
},
Template: core.PodTemplateSpec{
ObjectMeta: meta.ObjectMeta{
- Labels: labels,
+ Labels: buildTidbMonitorLabel(monitor.Name),
},
Spec: core.PodSpec{
@@ -417,8 +407,8 @@ func getMonitorPrometheusContainer(monitor *v1alpha1.TidbMonitor, tc *v1alpha1.T
if tc.IsTLSClusterEnabled() {
c.VolumeMounts = append(c.VolumeMounts, core.VolumeMount{
- Name: "tls-pd-client",
- MountPath: "/var/lib/pd-client-tls",
+ Name: "cluster-client-tls",
+ MountPath: util.ClusterClientTLSPath,
ReadOnly: true,
})
}
@@ -503,6 +493,7 @@ func getMonitorGrafanaContainer(secret *core.Secret, monitor *v1alpha1.TidbMonit
if monitor.Spec.Grafana.ImagePullPolicy != nil {
c.ImagePullPolicy = *monitor.Spec.Grafana.ImagePullPolicy
}
+ sort.Sort(util.SortEnvByName(c.Env))
return c
}
@@ -626,10 +617,10 @@ func getMonitorVolumes(config *core.ConfigMap, monitor *v1alpha1.TidbMonitor, tc
if tc.IsTLSClusterEnabled() {
defaultMode := int32(420)
tlsPDClient := core.Volume{
- Name: "tls-pd-client",
+ Name: "cluster-client-tls",
VolumeSource: core.VolumeSource{
Secret: &core.SecretVolumeSource{
- SecretName: fmt.Sprintf("%s-pd-client", tc.Name),
+ SecretName: util.ClusterClientTLSSecretName(tc.Name),
DefaultMode: &defaultMode,
},
},
@@ -641,13 +632,20 @@ func getMonitorVolumes(config *core.ConfigMap, monitor *v1alpha1.TidbMonitor, tc
func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
var services []*core.Service
- monitorLabel := label.New().Instance(monitor.Name).Monitor()
- labels := label.NewMonitor().Instance(monitor.Name).Monitor()
reloaderPortName := "tcp-reloader"
prometheusPortName := "http-prometheus"
grafanaPortName := "http-grafana"
+ // currently monitor label haven't managedBy label due to 1.0 historical problem.
+ // In order to be compatible with 1.0 release monitor, we have removed managedBy label for now.
+ // We would add managedBy label key during released 1.2 version
+ selector := map[string]string{
+ label.InstanceLabelKey: monitor.Name,
+ label.NameLabelKey: "tidb-cluster",
+ label.ComponentLabelKey: label.TiDBMonitorVal,
+ }
+
if monitor.BaseReloaderSpec().PortName() != nil {
reloaderPortName = *monitor.BaseReloaderSpec().PortName()
}
@@ -658,11 +656,12 @@ func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
grafanaPortName = *monitor.BaseGrafanaSpec().PortName()
}
+ promethuesName := fmt.Sprintf("%s-prometheus", monitor.Name)
prometheusService := &core.Service{
ObjectMeta: meta.ObjectMeta{
- Name: fmt.Sprintf("%s-prometheus", monitor.Name),
+ Name: promethuesName,
Namespace: monitor.Namespace,
- Labels: monitorLabel,
+ Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
Annotations: monitor.Spec.Prometheus.Service.Annotations,
},
@@ -676,15 +675,20 @@ func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
},
},
Type: monitor.Spec.Prometheus.Service.Type,
- Selector: labels,
+ Selector: selector,
},
}
+ if monitor.BasePrometheusSpec().ServiceType() == core.ServiceTypeLoadBalancer {
+ if monitor.Spec.Prometheus.Service.LoadBalancerIP != nil {
+ prometheusService.Spec.LoadBalancerIP = *monitor.Spec.Prometheus.Service.LoadBalancerIP
+ }
+ }
reloaderService := &core.Service{
ObjectMeta: meta.ObjectMeta{
- Name: fmt.Sprintf("%s-reloader", monitor.Name),
+ Name: fmt.Sprintf("%s-monitor-reloader", monitor.Name),
Namespace: monitor.Namespace,
- Labels: monitorLabel,
+ Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
Annotations: monitor.Spec.Prometheus.Service.Annotations,
},
@@ -697,21 +701,24 @@ func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
TargetPort: intstr.FromInt(9089),
},
},
- Type: monitor.Spec.Reloader.Service.Type,
- Selector: map[string]string{
- label.InstanceLabelKey: monitor.Name,
- label.ComponentLabelKey: label.TiDBMonitorVal,
- },
+ Type: monitor.Spec.Reloader.Service.Type,
+ Selector: selector,
},
}
+ if monitor.BaseReloaderSpec().ServiceType() == core.ServiceTypeLoadBalancer {
+ if monitor.Spec.Reloader.Service.LoadBalancerIP != nil {
+ reloaderService.Spec.LoadBalancerIP = *monitor.Spec.Reloader.Service.LoadBalancerIP
+ }
+ }
+
services = append(services, prometheusService, reloaderService)
if monitor.Spec.Grafana != nil {
grafanaService := &core.Service{
ObjectMeta: meta.ObjectMeta{
Name: fmt.Sprintf("%s-grafana", monitor.Name),
Namespace: monitor.Namespace,
- Labels: monitorLabel,
+ Labels: buildTidbMonitorLabel(monitor.Name),
OwnerReferences: []meta.OwnerReference{controller.GetTiDBMonitorOwnerRef(monitor)},
Annotations: monitor.Spec.Grafana.Service.Annotations,
},
@@ -724,25 +731,29 @@ func getMonitorService(monitor *v1alpha1.TidbMonitor) []*core.Service {
TargetPort: intstr.FromInt(3000),
},
},
- Type: monitor.Spec.Grafana.Service.Type,
- Selector: map[string]string{
- label.InstanceLabelKey: monitor.Name,
- label.ComponentLabelKey: label.TiDBMonitorVal,
- },
+ Type: monitor.Spec.Grafana.Service.Type,
+ Selector: selector,
},
}
+
+ if monitor.BaseGrafanaSpec().ServiceType() == core.ServiceTypeLoadBalancer {
+ if monitor.Spec.Grafana.Service.LoadBalancerIP != nil {
+ grafanaService.Spec.LoadBalancerIP = *monitor.Spec.Grafana.Service.LoadBalancerIP
+ }
+ }
+
services = append(services, grafanaService)
}
return services
}
func getMonitorPVC(monitor *v1alpha1.TidbMonitor) *core.PersistentVolumeClaim {
- monitorLabel := label.New().Instance(monitor.Name).Monitor().Labels()
+ l := buildTidbMonitorLabel(monitor.Name)
return &core.PersistentVolumeClaim{
ObjectMeta: meta.ObjectMeta{
Name: GetMonitorObjectName(monitor),
Namespace: monitor.Namespace,
- Labels: monitorLabel,
+ Labels: l,
Annotations: monitor.Spec.Annotations,
},
diff --git a/pkg/pdapi/pd_config.go b/pkg/pdapi/pd_config.go
new file mode 100644
index 0000000000..d51b60bbc3
--- /dev/null
+++ b/pkg/pdapi/pd_config.go
@@ -0,0 +1,265 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pdapi
+
+import (
+ "strconv"
+ "strings"
+)
+
+// PDConfigFromAPI is the configuration from PD API
+// +k8s:openapi-gen=true
+type PDConfigFromAPI struct {
+
+ // Log related config.
+ Log *PDLogConfig `toml:"log,omitempty" json:"log,omitempty"`
+
+ // Immutable, change should be made through pd-ctl after cluster creation
+ Schedule *PDScheduleConfig `toml:"schedule,omitempty" json:"schedule,omitempty"`
+
+ // Immutable, change should be made through pd-ctl after cluster creation
+ Replication *PDReplicationConfig `toml:"replication,omitempty" json:"replication,omitempty"`
+}
+
+// PDLogConfig serializes log related config in toml/json.
+// +k8s:openapi-gen=true
+type PDLogConfig struct {
+ // Log level.
+ // Optional: Defaults to info
+ Level string `toml:"level,omitempty" json:"level,omitempty"`
+ // Log format. one of json, text, or console.
+ Format string `toml:"format,omitempty" json:"format,omitempty"`
+ // Disable automatic timestamps in output.
+ DisableTimestamp *bool `toml:"disable-timestamp,omitempty" json:"disable-timestamp,omitempty"`
+ // File log config.
+ File *FileLogConfig `toml:"file,omitempty" json:"file,omitempty"`
+ // Development puts the logger in development mode, which changes the
+ // behavior of DPanicLevel and takes stacktraces more liberally.
+ Development *bool `toml:"development,omitempty" json:"development,omitempty"`
+ // DisableCaller stops annotating logs with the calling function's file
+ // name and line number. By default, all logs are annotated.
+ DisableCaller *bool `toml:"disable-caller,omitempty" json:"disable-caller,omitempty"`
+ // DisableStacktrace completely disables automatic stacktrace capturing. By
+ // default, stacktraces are captured for WarnLevel and above logs in
+ // development and ErrorLevel and above in production.
+ DisableStacktrace *bool `toml:"disable-stacktrace,omitempty" json:"disable-stacktrace,omitempty"`
+ // DisableErrorVerbose stops annotating logs with the full verbose error
+ // message.
+ DisableErrorVerbose *bool `toml:"disable-error-verbose,omitempty" json:"disable-error-verbose,omitempty"`
+}
+
+// PDReplicationConfig is the replication configuration.
+// +k8s:openapi-gen=true
+type PDReplicationConfig struct {
+ // MaxReplicas is the number of replicas for each region.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ // Optional: Defaults to 3
+ MaxReplicas *uint64 `toml:"max-replicas,omitempty" json:"max-replicas,omitempty"`
+
+ // The label keys specified the location of a store.
+ // The placement priorities is implied by the order of label keys.
+ // For example, ["zone", "rack"] means that we should place replicas to
+ // different zones first, then to different racks if we don't have enough zones.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ // +k8s:openapi-gen=false
+ LocationLabels StringSlice `toml:"location-labels,omitempty" json:"location-labels,omitempty"`
+ // StrictlyMatchLabel strictly checks if the label of TiKV is matched with LocaltionLabels.
+ // Immutable, change should be made through pd-ctl after cluster creation.
+ // Imported from v3.1.0
+ StrictlyMatchLabel *bool `toml:"strictly-match-label,omitempty" json:"strictly-match-label,string,omitempty"`
+
+ // When PlacementRules feature is enabled. MaxReplicas and LocationLabels are not used anymore.
+ EnablePlacementRules *bool `toml:"enable-placement-rules" json:"enable-placement-rules,string,omitempty"`
+}
+
+// ScheduleConfig is the schedule configuration.
+// +k8s:openapi-gen=true
+type PDScheduleConfig struct {
+ // If the snapshot count of one store is greater than this value,
+ // it will never be used as a source or target store.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ // Optional: Defaults to 3
+ MaxSnapshotCount *uint64 `toml:"max-snapshot-count,omitempty" json:"max-snapshot-count,omitempty"`
+ // Immutable, change should be made through pd-ctl after cluster creation
+ // Optional: Defaults to 16
+ MaxPendingPeerCount *uint64 `toml:"max-pending-peer-count,omitempty" json:"max-pending-peer-count,omitempty"`
+ // If both the size of region is smaller than MaxMergeRegionSize
+ // and the number of rows in region is smaller than MaxMergeRegionKeys,
+ // it will try to merge with adjacent regions.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ // Optional: Defaults to 20
+ MaxMergeRegionSize *uint64 `toml:"max-merge-region-size,omitempty" json:"max-merge-region-size,omitempty"`
+ // Immutable, change should be made through pd-ctl after cluster creation
+ // Optional: Defaults to 200000
+ MaxMergeRegionKeys *uint64 `toml:"max-merge-region-keys,omitempty" json:"max-merge-region-keys,omitempty"`
+ // SplitMergeInterval is the minimum interval time to permit merge after split.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ // Optional: Defaults to 1h
+ SplitMergeInterval string `toml:"split-merge-interval,omitempty" json:"split-merge-interval,omitempty"`
+ // PatrolRegionInterval is the interval for scanning region during patrol.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ PatrolRegionInterval string `toml:"patrol-region-interval,omitempty" json:"patrol-region-interval,omitempty"`
+ // MaxStoreDownTime is the max duration after which
+ // a store will be considered to be down if it hasn't reported heartbeats.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ // Optional: Defaults to 30m
+ MaxStoreDownTime string `toml:"max-store-down-time,omitempty" json:"max-store-down-time,omitempty"`
+ // LeaderScheduleLimit is the max coexist leader schedules.
+ // Immutable, change should be made through pd-ctl after cluster creation.
+ // Optional: Defaults to 4.
+ // Imported from v3.1.0
+ LeaderScheduleLimit *uint64 `toml:"leader-schedule-limit,omitempty" json:"leader-schedule-limit,omitempty"`
+ // RegionScheduleLimit is the max coexist region schedules.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ // Optional: Defaults to 2048
+ RegionScheduleLimit *uint64 `toml:"region-schedule-limit,omitempty" json:"region-schedule-limit,omitempty"`
+ // ReplicaScheduleLimit is the max coexist replica schedules.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ // Optional: Defaults to 64
+ ReplicaScheduleLimit *uint64 `toml:"replica-schedule-limit,omitempty" json:"replica-schedule-limit,omitempty"`
+ // MergeScheduleLimit is the max coexist merge schedules.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ // Optional: Defaults to 8
+ MergeScheduleLimit *uint64 `toml:"merge-schedule-limit,omitempty" json:"merge-schedule-limit,omitempty"`
+ // HotRegionScheduleLimit is the max coexist hot region schedules.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ // Optional: Defaults to 4
+ HotRegionScheduleLimit *uint64 `toml:"hot-region-schedule-limit,omitempty" json:"hot-region-schedule-limit,omitempty"`
+ // HotRegionCacheHitThreshold is the cache hits threshold of the hot region.
+ // If the number of times a region hits the hot cache is greater than this
+ // threshold, it is considered a hot region.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ HotRegionCacheHitsThreshold *uint64 `toml:"hot-region-cache-hits-threshold,omitempty" json:"hot-region-cache-hits-threshold,omitempty"`
+ // TolerantSizeRatio is the ratio of buffer size for balance scheduler.
+ // Immutable, change should be made through pd-ctl after cluster creation.
+ // Imported from v3.1.0
+ TolerantSizeRatio *float64 `toml:"tolerant-size-ratio,omitempty" json:"tolerant-size-ratio,omitempty"`
+ //
+ // high space stage transition stage low space stage
+ // |--------------------|-----------------------------|-------------------------|
+ // ^ ^ ^ ^
+ // 0 HighSpaceRatio * capacity LowSpaceRatio * capacity capacity
+ //
+ // LowSpaceRatio is the lowest usage ratio of store which regraded as low space.
+ // When in low space, store region score increases to very large and varies inversely with available size.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ LowSpaceRatio *float64 `toml:"low-space-ratio,omitempty" json:"low-space-ratio,omitempty"`
+ // HighSpaceRatio is the highest usage ratio of store which regraded as high space.
+ // High space means there is a lot of spare capacity, and store region score varies directly with used size.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ HighSpaceRatio *float64 `toml:"high-space-ratio,omitempty" json:"high-space-ratio,omitempty"`
+ // DisableLearner is the option to disable using AddLearnerNode instead of AddNode
+ // Immutable, change should be made through pd-ctl after cluster creation
+ DisableLearner *bool `toml:"disable-raft-learner,omitempty" json:"disable-raft-learner,string,omitempty"`
+
+ // DisableRemoveDownReplica is the option to prevent replica checker from
+ // removing down replicas.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ DisableRemoveDownReplica *bool `toml:"disable-remove-down-replica,omitempty" json:"disable-remove-down-replica,string,omitempty"`
+ // DisableReplaceOfflineReplica is the option to prevent replica checker from
+ // repalcing offline replicas.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ DisableReplaceOfflineReplica *bool `toml:"disable-replace-offline-replica,omitempty" json:"disable-replace-offline-replica,string,omitempty"`
+ // DisableMakeUpReplica is the option to prevent replica checker from making up
+ // replicas when replica count is less than expected.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ DisableMakeUpReplica *bool `toml:"disable-make-up-replica,omitempty" json:"disable-make-up-replica,string,omitempty"`
+ // DisableRemoveExtraReplica is the option to prevent replica checker from
+ // removing extra replicas.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ DisableRemoveExtraReplica *bool `toml:"disable-remove-extra-replica,omitempty" json:"disable-remove-extra-replica,string,omitempty"`
+ // DisableLocationReplacement is the option to prevent replica checker from
+ // moving replica to a better location.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ DisableLocationReplacement *bool `toml:"disable-location-replacement,omitempty" json:"disable-location-replacement,string,omitempty"`
+ // DisableNamespaceRelocation is the option to prevent namespace checker
+ // from moving replica to the target namespace.
+ // Immutable, change should be made through pd-ctl after cluster creation
+ DisableNamespaceRelocation *bool `toml:"disable-namespace-relocation,omitempty" json:"disable-namespace-relocation,string,omitempty"`
+
+ // Schedulers support for loding customized schedulers
+ // Immutable, change should be made through pd-ctl after cluster creation
+ Schedulers *PDSchedulerConfigs `toml:"schedulers,omitempty" json:"schedulers-v2,omitempty"` // json v2 is for the sake of compatible upgrade
+
+ // Only used to display
+ SchedulersPayload map[string]string `toml:"schedulers-payload" json:"schedulers-payload,omitempty"`
+
+ // EnableOneWayMerge is the option to enable one way merge. This means a Region can only be merged into the next region of it.
+ // Imported from v3.1.0
+ EnableOneWayMerge *bool `toml:"enable-one-way-merge" json:"enable-one-way-merge,string,omitempty"`
+ // EnableCrossTableMerge is the option to enable cross table merge. This means two Regions can be merged with different table IDs.
+ // This option only works when key type is "table".
+ // Imported from v3.1.0
+ EnableCrossTableMerge *bool `toml:"enable-cross-table-merge" json:"enable-cross-table-merge,string,omitempty"`
+}
+
+type PDSchedulerConfigs []PDSchedulerConfig
+
+// PDSchedulerConfig is customized scheduler configuration
+// +k8s:openapi-gen=true
+type PDSchedulerConfig struct {
+ // Immutable, change should be made through pd-ctl after cluster creation
+ Type string `toml:"type,omitempty" json:"type,omitempty"`
+ // Immutable, change should be made through pd-ctl after cluster creation
+ Args []string `toml:"args,omitempty" json:"args,omitempty"`
+ // Immutable, change should be made through pd-ctl after cluster creation
+ Disable *bool `toml:"disable,omitempty" json:"disable,omitempty"`
+}
+
+// PDStoreLabel is the config item of LabelPropertyConfig.
+// +k8s:openapi-gen=true
+type PDStoreLabel struct {
+ Key string `toml:"key,omitempty" json:"key,omitempty"`
+ Value string `toml:"value,omitempty" json:"value,omitempty"`
+}
+
+type PDStoreLabels []PDStoreLabel
+
+type PDLabelPropertyConfig map[string]PDStoreLabels
+
+// +k8s:openapi-gen=true
+type FileLogConfig struct {
+ // Log filename, leave empty to disable file log.
+ Filename string `toml:"filename,omitempty" json:"filename,omitempty"`
+ // Is log rotate enabled.
+ LogRotate bool `toml:"log-rotate,omitempty" json:"log-rotate,omitempty"`
+ // Max size for a single file, in MB.
+ MaxSize int `toml:"max-size,omitempty" json:"max-size,omitempty"`
+ // Max log keep days, default is never deleting.
+ MaxDays int `toml:"max-days,omitempty" json:"max-days,omitempty"`
+ // Maximum number of old log files to retain.
+ MaxBackups int `toml:"max-backups,omitempty" json:"max-backups,omitempty"`
+}
+
+//StringSlice is more friendly to json encode/decode
+type StringSlice []string
+
+// MarshalJSON returns the size as a JSON string.
+func (s StringSlice) MarshalJSON() ([]byte, error) {
+ return []byte(strconv.Quote(strings.Join(s, ","))), nil
+}
+
+// UnmarshalJSON parses a JSON string into the bytesize.
+func (s *StringSlice) UnmarshalJSON(text []byte) error {
+ data, err := strconv.Unquote(string(text))
+ if err != nil {
+ return err
+ }
+ if len(data) == 0 {
+ *s = nil
+ return nil
+ }
+ *s = strings.Split(data, ",")
+ return nil
+}
diff --git a/pkg/pdapi/pdapi.go b/pkg/pdapi/pdapi.go
index 348b978f90..ab9bdb32c8 100644
--- a/pkg/pdapi/pdapi.go
+++ b/pkg/pdapi/pdapi.go
@@ -25,14 +25,15 @@ import (
"sync"
"time"
- "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
- glog "k8s.io/klog"
+ "github.com/pingcap/tidb-operator/pkg/util"
+
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/klog"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/pdpb"
"github.com/pingcap/pd/pkg/typeutil"
"github.com/pingcap/tidb-operator/pkg/httputil"
- certutil "github.com/pingcap/tidb-operator/pkg/util/crypto"
types "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
@@ -65,27 +66,23 @@ func NewDefaultPDControl(kubeCli kubernetes.Interface) PDControlInterface {
// GetTLSConfig returns *tls.Config for given TiDB cluster.
// It loads in-cluster root ca if caCert is empty.
func GetTLSConfig(kubeCli kubernetes.Interface, namespace Namespace, tcName string, caCert []byte) (*tls.Config, error) {
- secretName := fmt.Sprintf("%s-pd-client", tcName)
+ secretName := util.ClusterClientTLSSecretName(tcName)
secret, err := kubeCli.CoreV1().Secrets(string(namespace)).Get(secretName, types.GetOptions{})
if err != nil {
return nil, fmt.Errorf("unable to load certificates from secret %s/%s: %v", namespace, secretName, err)
}
- var rootCAs *x509.CertPool
+ rootCAs := x509.NewCertPool()
var tlsCert tls.Certificate
if len(caCert) > 0 {
- rootCAs = x509.NewCertPool()
rootCAs.AppendCertsFromPEM(caCert)
} else {
- rootCAs, err = certutil.ReadCACerts()
- if err != nil {
- return nil, err
- }
+ rootCAs.AppendCertsFromPEM(secret.Data[v1.ServiceAccountRootCAKey])
}
- clientCert, certExists := secret.Data["cert"]
- clientKey, keyExists := secret.Data["key"]
+ clientCert, certExists := secret.Data[v1.TLSCertKey]
+ clientKey, keyExists := secret.Data[v1.TLSPrivateKeyKey]
if !certExists || !keyExists {
return nil, fmt.Errorf("cert or key does not exist in secret %s/%s", namespace, secretName)
}
@@ -118,7 +115,7 @@ func (pdc *defaultPDControl) GetPDClient(namespace Namespace, tcName string, tls
if tlsEnabled {
tlsConfig, err = GetTLSConfig(pdc.kubeCli, namespace, tcName, nil)
if err != nil {
- glog.Errorf("Unable to get tls config for tidb cluster %q, pd client may not work: %v", tcName, err)
+ klog.Errorf("Unable to get tls config for tidb cluster %q, pd client may not work: %v", tcName, err)
return &pdClient{url: PdClientURL(namespace, tcName, scheme), httpClient: &http.Client{Timeout: DefaultTimeout}}
}
}
@@ -142,7 +139,7 @@ type PDClient interface {
// GetHealth returns the PD's health info
GetHealth() (*HealthInfo, error)
// GetConfig returns PD's config
- GetConfig() (*v1alpha1.PDConfig, error)
+ GetConfig() (*PDConfigFromAPI, error)
// GetCluster returns used when syncing pod labels.
GetCluster() (*metapb.Cluster, error)
// GetMembers returns all PD members from cluster
@@ -158,6 +155,8 @@ type PDClient interface {
SetStoreLabels(storeID uint64, labels map[string]string) (bool, error)
// DeleteStore deletes a TiKV store from cluster
DeleteStore(storeID uint64) error
+ // SetStoreState sets store to specified state.
+ SetStoreState(storeID uint64, state string) error
// DeleteMember deletes a PD member from cluster
DeleteMember(name string) error
// DeleteMemberByID deletes a PD member from cluster
@@ -284,13 +283,13 @@ func (pc *pdClient) GetHealth() (*HealthInfo, error) {
}, nil
}
-func (pc *pdClient) GetConfig() (*v1alpha1.PDConfig, error) {
+func (pc *pdClient) GetConfig() (*PDConfigFromAPI, error) {
apiURL := fmt.Sprintf("%s/%s", pc.url, configPrefix)
body, err := httputil.GetBodyOK(pc.httpClient, apiURL)
if err != nil {
return nil, err
}
- config := &v1alpha1.PDConfig{}
+ config := &PDConfigFromAPI{}
err = json.Unmarshal(body, config)
if err != nil {
return nil, err
@@ -406,6 +405,30 @@ func (pc *pdClient) DeleteStore(storeID uint64) error {
return fmt.Errorf("failed to delete store %d: %v", storeID, string(body))
}
+// SetStoreState sets store to specified state.
+func (pc *pdClient) SetStoreState(storeID uint64, state string) error {
+ apiURL := fmt.Sprintf("%s/%s/%d/state?state=%s", pc.url, storePrefix, storeID, state)
+ req, err := http.NewRequest("POST", apiURL, nil)
+ if err != nil {
+ return err
+ }
+ res, err := pc.httpClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer httputil.DeferClose(res.Body)
+
+ if res.StatusCode == http.StatusOK || res.StatusCode == http.StatusNotFound {
+ return nil
+ }
+ body, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return err
+ }
+
+ return fmt.Errorf("failed to delete store %d: %v", storeID, string(body))
+}
+
func (pc *pdClient) DeleteMemberByID(memberID uint64) error {
var exist bool
members, err := pc.GetMembers()
@@ -541,10 +564,10 @@ func (pc *pdClient) EndEvictLeader(storeID uint64) error {
return nil
}
if res.StatusCode == http.StatusOK {
- glog.Infof("call DELETE method: %s success", apiURL)
+ klog.Infof("call DELETE method: %s success", apiURL)
} else {
err2 := httputil.ReadErrorBody(res.Body)
- glog.Errorf("call DELETE method: %s failed,statusCode: %v,error: %v", apiURL, res.StatusCode, err2)
+ klog.Errorf("call DELETE method: %s failed,statusCode: %v,error: %v", apiURL, res.StatusCode, err2)
}
// pd will return an error with the body contains "scheduler not found" if the scheduler is not found
@@ -669,6 +692,7 @@ const (
GetTombStoneStoresActionType ActionType = "GetTombStoneStores"
GetStoreActionType ActionType = "GetStore"
DeleteStoreActionType ActionType = "DeleteStore"
+ SetStoreStateActionType ActionType = "SetStoreState"
DeleteMemberByIDActionType ActionType = "DeleteMemberByID"
DeleteMemberActionType ActionType = "DeleteMember "
SetStoreLabelsActionType ActionType = "SetStoreLabels"
@@ -728,13 +752,13 @@ func (pc *FakePDClient) GetHealth() (*HealthInfo, error) {
return result.(*HealthInfo), nil
}
-func (pc *FakePDClient) GetConfig() (*v1alpha1.PDConfig, error) {
+func (pc *FakePDClient) GetConfig() (*PDConfigFromAPI, error) {
action := &Action{}
result, err := pc.fakeAPI(GetConfigActionType, action)
if err != nil {
return nil, err
}
- return result.(*v1alpha1.PDConfig), nil
+ return result.(*PDConfigFromAPI), nil
}
func (pc *FakePDClient) GetCluster() (*metapb.Cluster, error) {
@@ -793,6 +817,15 @@ func (pc *FakePDClient) DeleteStore(id uint64) error {
return nil
}
+func (pc *FakePDClient) SetStoreState(id uint64, state string) error {
+ if reaction, ok := pc.reactions[SetStoreStateActionType]; ok {
+ action := &Action{ID: id}
+ _, err := reaction(action)
+ return err
+ }
+ return nil
+}
+
func (pc *FakePDClient) DeleteMemberByID(id uint64) error {
if reaction, ok := pc.reactions[DeleteMemberByIDActionType]; ok {
action := &Action{ID: id}
diff --git a/pkg/pdapi/pdapi_test.go b/pkg/pdapi/pdapi_test.go
index 9b96be0115..d8da8d3c38 100644
--- a/pkg/pdapi/pdapi_test.go
+++ b/pkg/pdapi/pdapi_test.go
@@ -26,7 +26,6 @@ import (
. "github.com/onsi/gomega"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/pdpb"
- "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
)
const (
@@ -81,8 +80,8 @@ func TestHealth(t *testing.T) {
func TestGetConfig(t *testing.T) {
g := NewGomegaWithT(t)
- config := &v1alpha1.PDConfig{
- Schedule: &v1alpha1.PDScheduleConfig{
+ config := &PDConfigFromAPI{
+ Schedule: &PDScheduleConfig{
MaxStoreDownTime: "10s",
},
}
@@ -94,7 +93,7 @@ func TestGetConfig(t *testing.T) {
path string
method string
resp []byte
- want *v1alpha1.PDConfig
+ want *PDConfigFromAPI
}{{
caseName: "GetConfig",
path: fmt.Sprintf("/%s", configPrefix),
diff --git a/pkg/scheduler/predicates/ha.go b/pkg/scheduler/predicates/ha.go
index 7d52c300f9..4954466453 100644
--- a/pkg/scheduler/predicates/ha.go
+++ b/pkg/scheduler/predicates/ha.go
@@ -32,7 +32,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/retry"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
type ha struct {
@@ -87,7 +87,7 @@ func (h *ha) Filter(instanceName string, pod *apiv1.Pod, nodes []apiv1.Node) ([]
tcName := getTCNameFromPod(pod, component)
if component != label.PDLabelVal && component != label.TiKVLabelVal {
- glog.V(4).Infof("component %s is ignored in HA predicate", component)
+ klog.V(4).Infof("component %s is ignored in HA predicate", component)
return nodes, nil
}
@@ -118,7 +118,7 @@ func (h *ha) Filter(instanceName string, pod *apiv1.Pod, nodes []apiv1.Node) ([]
return nil, err
}
replicas := getReplicasFrom(tc, component)
- glog.Infof("ha: tidbcluster %s/%s component %s replicas %d", ns, tcName, component, replicas)
+ klog.Infof("ha: tidbcluster %s/%s component %s replicas %d", ns, tcName, component, replicas)
allNodes := make(sets.String)
nodeMap := make(map[string][]string)
@@ -137,7 +137,7 @@ func (h *ha) Filter(instanceName string, pod *apiv1.Pod, nodes []apiv1.Node) ([]
nodeMap[nodeName] = append(nodeMap[nodeName], pName)
}
- glog.V(4).Infof("nodeMap: %+v", nodeMap)
+ klog.V(4).Infof("nodeMap: %+v", nodeMap)
min := -1
minNodeNames := make([]string, 0)
@@ -187,13 +187,13 @@ func (h *ha) Filter(instanceName string, pod *apiv1.Pod, nodes []apiv1.Node) ([]
// tikv replicas less than 3 cannot achieve high availability
if component == label.TiKVLabelVal && replicas < 3 {
minNodeNames = append(minNodeNames, nodeName)
- glog.Infof("replicas is %d, add node %s to minNodeNames", replicas, nodeName)
+ klog.Infof("replicas is %d, add node %s to minNodeNames", replicas, nodeName)
continue
}
if podsCount+1 > maxPodsPerNode {
// pods on this node exceeds the limit, skip
- glog.Infof("node %s has %d instances of component %s, max allowed is %d, skipping",
+ klog.Infof("node %s has %d instances of component %s, max allowed is %d, skipping",
nodeName, podsCount, component, maxPodsPerNode)
continue
}
@@ -203,7 +203,7 @@ func (h *ha) Filter(instanceName string, pod *apiv1.Pod, nodes []apiv1.Node) ([]
min = podsCount
}
if podsCount > min {
- glog.Infof("node %s podsCount %d > min %d, skipping", nodeName, podsCount, min)
+ klog.Infof("node %s podsCount %d > min %d, skipping", nodeName, podsCount, min)
continue
}
if podsCount < min {
@@ -282,11 +282,11 @@ func (h *ha) realAcquireLock(pod *apiv1.Pod) (*apiv1.PersistentVolumeClaim, *api
delete(schedulingPVC.Annotations, label.AnnPVCPodScheduling)
err = h.updatePVCFn(schedulingPVC)
if err != nil {
- glog.Errorf("ha: failed to delete pvc %s/%s annotation %s, %v",
+ klog.Errorf("ha: failed to delete pvc %s/%s annotation %s, %v",
ns, schedulingPVC.GetName(), label.AnnPVCPodScheduling, err)
return schedulingPVC, currentPVC, err
}
- glog.Infof("ha: delete pvc %s/%s annotation %s successfully",
+ klog.Infof("ha: delete pvc %s/%s annotation %s successfully",
ns, schedulingPVC.GetName(), label.AnnPVCPodScheduling)
return schedulingPVC, currentPVC, h.setCurrentPodScheduling(currentPVC)
}
@@ -319,10 +319,10 @@ func (h *ha) realUpdatePVCFn(pvc *apiv1.PersistentVolumeClaim) error {
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
_, updateErr := h.kubeCli.CoreV1().PersistentVolumeClaims(ns).Update(pvc)
if updateErr == nil {
- glog.Infof("update PVC: [%s/%s] successfully, TidbCluster: %s", ns, pvcName, tcName)
+ klog.Infof("update PVC: [%s/%s] successfully, TidbCluster: %s", ns, pvcName, tcName)
return nil
}
- glog.Errorf("failed to update PVC: [%s/%s], TidbCluster: %s, error: %v", ns, pvcName, tcName, updateErr)
+ klog.Errorf("failed to update PVC: [%s/%s], TidbCluster: %s, error: %v", ns, pvcName, tcName, updateErr)
if updated, err := h.pvcGetFn(ns, pvcName); err == nil {
// make a copy so we don't mutate the shared cache
@@ -355,11 +355,11 @@ func (h *ha) setCurrentPodScheduling(pvc *apiv1.PersistentVolumeClaim) error {
pvc.Annotations[label.AnnPVCPodScheduling] = now
err := h.updatePVCFn(pvc)
if err != nil {
- glog.Errorf("ha: failed to set pvc %s/%s annotation %s to %s, %v",
+ klog.Errorf("ha: failed to set pvc %s/%s annotation %s to %s, %v",
ns, pvcName, label.AnnPVCPodScheduling, now, err)
return err
}
- glog.Infof("ha: set pvc %s/%s annotation %s to %s successfully",
+ klog.Infof("ha: set pvc %s/%s annotation %s to %s successfully",
ns, pvcName, label.AnnPVCPodScheduling, now)
return nil
}
diff --git a/pkg/scheduler/predicates/stable_scheduling.go b/pkg/scheduler/predicates/stable_scheduling.go
index 1ab7bea2ac..0c1bdd0706 100644
--- a/pkg/scheduler/predicates/stable_scheduling.go
+++ b/pkg/scheduler/predicates/stable_scheduling.go
@@ -25,7 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
const (
@@ -93,10 +93,10 @@ func (p *stableScheduling) Filter(instanceName string, pod *apiv1.Pod, nodes []a
nodeName := p.findPreviousNodeInTC(tc, pod)
if nodeName != "" {
- glog.V(2).Infof("found previous node %q for pod %q in TiDB cluster %q", nodeName, podName, tcName)
+ klog.V(2).Infof("found previous node %q for pod %q in TiDB cluster %q", nodeName, podName, tcName)
for _, node := range nodes {
if node.Name == nodeName {
- glog.V(2).Infof("previous node %q for pod %q in TiDB cluster %q exists in candicates, filter out other nodes", nodeName, podName, tcName)
+ klog.V(2).Infof("previous node %q for pod %q in TiDB cluster %q exists in candicates, filter out other nodes", nodeName, podName, tcName)
return []apiv1.Node{node}, nil
}
}
@@ -104,7 +104,7 @@ func (p *stableScheduling) Filter(instanceName string, pod *apiv1.Pod, nodes []a
}
msg := fmt.Sprintf("no previous node exists for pod %q in TiDB cluster %s/%s", podName, ns, tcName)
- glog.Warning(msg)
+ klog.Warning(msg)
return nodes, errors.New(msg)
}
diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go
index 6a03670057..6a5f35a33a 100644
--- a/pkg/scheduler/scheduler.go
+++ b/pkg/scheduler/scheduler.go
@@ -25,7 +25,7 @@ import (
kubescheme "k8s.io/client-go/kubernetes/scheme"
eventv1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
- glog "k8s.io/klog"
+ "k8s.io/klog"
schedulerapiv1 "k8s.io/kubernetes/pkg/scheduler/api/v1"
)
@@ -53,7 +53,7 @@ type scheduler struct {
// NewScheduler returns a Scheduler
func NewScheduler(kubeCli kubernetes.Interface, cli versioned.Interface) Scheduler {
eventBroadcaster := record.NewBroadcaster()
- eventBroadcaster.StartLogging(glog.Infof)
+ eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&eventv1.EventSinkImpl{
Interface: eventv1.New(kubeCli.CoreV1().RESTClient()).Events("")})
recorder := eventBroadcaster.NewRecorder(kubescheme.Scheme, apiv1.EventSource{Component: "tidb-scheduler"})
@@ -93,7 +93,7 @@ func (s *scheduler) Filter(args *schedulerapiv1.ExtenderArgs) (*schedulerapiv1.E
var instanceName string
var exist bool
if instanceName, exist = pod.Labels[label.InstanceLabelKey]; !exist {
- glog.Warningf("can't find instanceName in pod labels: %s/%s", ns, podName)
+ klog.Warningf("can't find instanceName in pod labels: %s/%s", ns, podName)
return &schedulerapiv1.ExtenderFilterResult{
Nodes: args.Nodes,
}, nil
@@ -101,7 +101,7 @@ func (s *scheduler) Filter(args *schedulerapiv1.ExtenderArgs) (*schedulerapiv1.E
component, ok := pod.Labels[label.ComponentLabelKey]
if !ok {
- glog.Warningf("can't find component label in pod labels: %s/%s", ns, podName)
+ klog.Warningf("can't find component label in pod labels: %s/%s", ns, podName)
return &schedulerapiv1.ExtenderFilterResult{
Nodes: args.Nodes,
}, nil
@@ -109,18 +109,18 @@ func (s *scheduler) Filter(args *schedulerapiv1.ExtenderArgs) (*schedulerapiv1.E
predicatesByComponent, ok := s.predicates[component]
if !ok {
- glog.Warningf("no predicate for component %q, ignored", component)
+ klog.Warningf("no predicate for component %q, ignored", component)
return &schedulerapiv1.ExtenderFilterResult{
Nodes: args.Nodes,
}, nil
}
- glog.Infof("scheduling pod: %s/%s", ns, podName)
+ klog.Infof("scheduling pod: %s/%s", ns, podName)
var err error
for _, predicate := range predicatesByComponent {
- glog.Infof("entering predicate: %s, nodes: %v", predicate.Name(), predicates.GetNodeNames(kubeNodes))
+ klog.Infof("entering predicate: %s, nodes: %v", predicate.Name(), predicates.GetNodeNames(kubeNodes))
kubeNodes, err = predicate.Filter(instanceName, pod, kubeNodes)
- glog.Infof("leaving predicate: %s, nodes: %v", predicate.Name(), predicates.GetNodeNames(kubeNodes))
+ klog.Infof("leaving predicate: %s, nodes: %v", predicate.Name(), predicates.GetNodeNames(kubeNodes))
if err != nil {
s.recorder.Event(pod, apiv1.EventTypeWarning, predicate.Name(), err.Error())
if len(kubeNodes) == 0 {
diff --git a/pkg/scheduler/server/mux.go b/pkg/scheduler/server/mux.go
index 9061ce54dc..85ab686f8e 100644
--- a/pkg/scheduler/server/mux.go
+++ b/pkg/scheduler/server/mux.go
@@ -22,7 +22,7 @@ import (
"github.com/pingcap/tidb-operator/pkg/client/clientset/versioned"
"github.com/pingcap/tidb-operator/pkg/scheduler"
"k8s.io/client-go/kubernetes"
- glog "k8s.io/klog"
+ "k8s.io/klog"
schedulerapiv1 "k8s.io/kubernetes/pkg/scheduler/api/v1"
)
@@ -58,8 +58,8 @@ func StartServer(kubeCli kubernetes.Interface, cli versioned.Interface, port int
Writes(schedulerapiv1.HostPriorityList{}))
restful.Add(ws)
- glog.Infof("start scheduler extender server, listening on 0.0.0.0:%d", port)
- glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), nil))
+ klog.Infof("start scheduler extender server, listening on 0.0.0.0:%d", port)
+ klog.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), nil))
}
func (svr *server) filterNode(req *restful.Request, resp *restful.Response) {
@@ -104,8 +104,8 @@ func (svr *server) prioritizeNode(req *restful.Request, resp *restful.Response)
}
func errorResponse(resp *restful.Response, svcErr restful.ServiceError) {
- glog.Error(svcErr.Message)
+ klog.Error(svcErr.Message)
if writeErr := resp.WriteServiceError(svcErr.Code, svcErr); writeErr != nil {
- glog.Errorf("unable to write error: %v", writeErr)
+ klog.Errorf("unable to write error: %v", writeErr)
}
}
diff --git a/pkg/tkctl/cmd/list/list.go b/pkg/tkctl/cmd/list/list.go
index d115b7ff6b..42fa1d2394 100644
--- a/pkg/tkctl/cmd/list/list.go
+++ b/pkg/tkctl/cmd/list/list.go
@@ -19,7 +19,7 @@ import (
"github.com/pingcap/tidb-operator/pkg/tkctl/readable"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
- glog "k8s.io/klog"
+ "k8s.io/klog"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
kubeprinters "k8s.io/kubernetes/pkg/printers"
)
@@ -129,7 +129,7 @@ func (o *ListOptions) Run(tkcContext *config.TkcContext, cmd *cobra.Command, arg
for _, info := range infos {
internalObj, err := v1alpha1.Scheme.ConvertToVersion(info.Object, v1alpha1.SchemeGroupVersion)
if err != nil {
- glog.V(1).Info(err)
+ klog.V(1).Info(err)
printer.PrintObj(info.Object, w)
} else {
printer.PrintObj(internalObj, w)
diff --git a/pkg/tkctl/config/config.go b/pkg/tkctl/config/config.go
index c2079e927d..5a06e7bcee 100644
--- a/pkg/tkctl/config/config.go
+++ b/pkg/tkctl/config/config.go
@@ -25,7 +25,7 @@ import (
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
- glog "k8s.io/klog"
+ "k8s.io/klog"
restclient "k8s.io/client-go/rest"
)
@@ -98,11 +98,11 @@ func (c *TkcContext) ToTkcClientConfig() (*TkcClientConfig, error) {
// try loading tidb cluster config
tcConfigFile, err := tcConfigLocation()
if err != nil {
- glog.V(4).Info("Error getting tidb cluster config file location")
+ klog.V(4).Info("Error getting tidb cluster config file location")
} else {
tcConfig, err := LoadFile(tcConfigFile)
if err != nil {
- glog.V(4).Info("Error reading tidb cluster config file")
+ klog.V(4).Info("Error reading tidb cluster config file")
c.TidbClusterConfig = &TidbClusterConfig{}
} else {
c.TidbClusterConfig = tcConfig
diff --git a/pkg/upgrader/upgrader.go b/pkg/upgrader/upgrader.go
index 6ad8729126..89ea06e697 100644
--- a/pkg/upgrader/upgrader.go
+++ b/pkg/upgrader/upgrader.go
@@ -23,11 +23,11 @@ import (
"github.com/pingcap/tidb-operator/pkg/client/clientset/versioned"
"github.com/pingcap/tidb-operator/pkg/features"
"github.com/pingcap/tidb-operator/pkg/label"
+ "github.com/pingcap/tidb-operator/pkg/util"
utildiscovery "github.com/pingcap/tidb-operator/pkg/util/discovery"
appsv1 "k8s.io/api/apps/v1"
- "k8s.io/apimachinery/pkg/api/errors"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
"k8s.io/klog"
)
@@ -56,49 +56,41 @@ type upgrader struct {
var _ Interface = &upgrader{}
-// isOwnedByTidbCluster checks if the given object is owned by TidbCluster.
-// Schema Kind and Group are checked, Version is ignored.
-func isOwnedByTidbCluster(obj metav1.Object) bool {
- ref := metav1.GetControllerOf(obj)
- if ref == nil {
- return false
- }
- gv, err := schema.ParseGroupVersion(ref.APIVersion)
- if err != nil {
- return false
- }
- return ref.Kind == v1alpha1.TiDBClusterKind && gv.Group == v1alpha1.SchemeGroupVersion.Group
-}
-
func (u *upgrader) Upgrade() error {
if features.DefaultFeatureGate.Enabled(features.AdvancedStatefulSet) {
klog.Infof("Upgrader: migrating Kubernetes StatefulSets to Advanced StatefulSets")
- tcList, err := u.cli.PingcapV1alpha1().TidbClusters(u.ns).List(metav1.ListOptions{})
- if err != nil {
- return err
- }
- for _, tc := range tcList.Items {
- // Existing delete slots annotations must be removed first. This is
- // a safety check to ensure no pods are affected in upgrading
- // process.
- if anns := deleteSlotAnns(&tc); len(anns) > 0 {
- return fmt.Errorf("Upgrader: TidbCluster %s/%s has delete slot annotations %v, please remove them before enabling AdvancedStatefulSet feature", tc.Namespace, tc.Name, anns)
- }
- }
stsList, err := u.kubeCli.AppsV1().StatefulSets(u.ns).List(metav1.ListOptions{})
if err != nil {
return err
}
stsToMigrate := make([]appsv1.StatefulSet, 0)
+ tidbClusters := make([]*v1alpha1.TidbCluster, 0)
for _, sts := range stsList.Items {
- if isOwnedByTidbCluster(&sts) {
+ if ok, tcRef := util.IsOwnedByTidbCluster(&sts); ok {
stsToMigrate = append(stsToMigrate, sts)
+ tc, err := u.cli.PingcapV1alpha1().TidbClusters(sts.Namespace).Get(tcRef.Name, metav1.GetOptions{})
+ if err != nil && !apierrors.IsNotFound(err) {
+ return err
+ }
+ if tc != nil {
+ tidbClusters = append(tidbClusters, tc)
+ }
}
}
if len(stsToMigrate) <= 0 {
klog.Infof("Upgrader: found 0 Kubernetes StatefulSets owned by TidbCluster, nothing need to do")
return nil
}
+ klog.Infof("Upgrader: %d Kubernetes Statfulsets owned by TidbCluster should be migrated to Advanced Statefulsets", len(stsToMigrate))
+ // Check if relavant TidbClusters have delete slots annotations set.
+ for _, tc := range tidbClusters {
+ // Existing delete slots annotations must be removed first. This is
+ // a safety check to ensure no pods are affected in upgrading
+ // process.
+ if anns := deleteSlotAnns(tc); len(anns) > 0 {
+ return fmt.Errorf("Upgrader: TidbCluster %s/%s has delete slot annotations %v, please remove them before enabling AdvancedStatefulSet feature", tc.Namespace, tc.Name, anns)
+ }
+ }
klog.Infof("Upgrader: found %d Kubernetes StatefulSets owned by TidbCluster, trying to migrate one by one", len(stsToMigrate))
for _, sts := range stsToMigrate {
_, err := helper.Upgrade(u.kubeCli, u.asCli, &sts)
@@ -116,7 +108,7 @@ func (u *upgrader) Upgrade() error {
}
stsList, err := u.asCli.AppsV1().StatefulSets(u.ns).List(metav1.ListOptions{})
if err != nil {
- if errors.IsNotFound(err) {
+ if apierrors.IsNotFound(err) {
klog.Infof("Upgrader: Kubernetes server does't have Advanced StatefulSets resources, skip to revert")
return nil
}
@@ -124,7 +116,7 @@ func (u *upgrader) Upgrade() error {
}
stsToMigrate := make([]asappsv1.StatefulSet, 0)
for _, sts := range stsList.Items {
- if isOwnedByTidbCluster(&sts) {
+ if ok, _ := util.IsOwnedByTidbCluster(&sts); ok {
stsToMigrate = append(stsToMigrate, sts)
}
}
diff --git a/pkg/upgrader/upgrader_test.go b/pkg/upgrader/upgrader_test.go
index 0861328c44..aedf4e4830 100644
--- a/pkg/upgrader/upgrader_test.go
+++ b/pkg/upgrader/upgrader_test.go
@@ -25,6 +25,7 @@ import (
versionedfake "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned/fake"
"github.com/pingcap/tidb-operator/pkg/features"
"github.com/pingcap/tidb-operator/pkg/label"
+ "github.com/pingcap/tidb-operator/pkg/util"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
@@ -86,7 +87,7 @@ func TestIsOwnedByTidbCluster(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- ok := isOwnedByTidbCluster(&tt.sts)
+ ok, _ := util.IsOwnedByTidbCluster(&tt.sts)
if tt.wantOK != ok {
t.Errorf("got %v, want %v", ok, tt.wantOK)
}
@@ -160,10 +161,12 @@ func TestDeleteSlotAnns(t *testing.T) {
}
var (
+ ownerTCName = "foo"
validOwnerRefs = []metav1.OwnerReference{
{
APIVersion: "pingcap.com/v1alpha1",
Kind: "TidbCluster",
+ Name: ownerTCName,
Controller: pointer.BoolPtr(true),
},
}
@@ -319,6 +322,8 @@ func TestUpgrade(t *testing.T) {
tidbClusters: []v1alpha1.TidbCluster{
{
ObjectMeta: metav1.ObjectMeta{
+ Name: ownerTCName,
+ Namespace: "sts",
Annotations: map[string]string{
label.AnnTiDBDeleteSlots: "[1,2]",
},
@@ -378,6 +383,78 @@ func TestUpgrade(t *testing.T) {
},
},
},
+ {
+ name: "should upgrade if tc has delete slot annotations but does not own Kubernetes StatefulSets",
+ tidbClusters: []v1alpha1.TidbCluster{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: ownerTCName,
+ Namespace: "sts",
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "bar",
+ Namespace: "sts",
+ Annotations: map[string]string{
+ label.AnnTiDBDeleteSlots: "[1,2]",
+ },
+ },
+ },
+ },
+ statefulsets: []appsv1.StatefulSet{
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "StatefulSet",
+ APIVersion: "apps/v1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "sts1",
+ Namespace: "sts",
+ OwnerReferences: validOwnerRefs,
+ },
+ },
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "StatefulSet",
+ APIVersion: "apps/v1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "sts2",
+ Namespace: "sts",
+ OwnerReferences: validOwnerRefs,
+ },
+ },
+ },
+ feature: "AdvancedStatefulSet=true",
+ ns: metav1.NamespaceAll,
+ wantErr: false,
+ wantAdvancedStatefulsets: []asappsv1.StatefulSet{
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "StatefulSet",
+ APIVersion: "apps.pingcap.com/v1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "sts1",
+ Namespace: "sts",
+ OwnerReferences: validOwnerRefs,
+ },
+ },
+ {
+ TypeMeta: metav1.TypeMeta{
+ Kind: "StatefulSet",
+ APIVersion: "apps.pingcap.com/v1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "sts2",
+ Namespace: "sts",
+ OwnerReferences: validOwnerRefs,
+ },
+ },
+ },
+ wantStatefulsets: nil,
+ },
{
name: "should ignore if sts is not owned by TidbCluster",
tidbClusters: nil,
diff --git a/pkg/util/crdutil.go b/pkg/util/crdutil.go
index 0b5d884a30..6d64c624fc 100644
--- a/pkg/util/crdutil.go
+++ b/pkg/util/crdutil.go
@@ -28,7 +28,7 @@ var (
Name: "PD",
Type: "string",
Description: "The image for PD cluster",
- JSONPath: ".spec.pd.image",
+ JSONPath: ".status.pd.image",
}
tidbClusterPDStorageColumn = extensionsobj.CustomResourceColumnDefinition{
Name: "Storage",
@@ -52,7 +52,7 @@ var (
Name: "TiKV",
Type: "string",
Description: "The image for TiKV cluster",
- JSONPath: ".spec.tikv.image",
+ JSONPath: ".status.tikv.image",
}
tidbClusterTiKVStorageColumn = extensionsobj.CustomResourceColumnDefinition{
Name: "Storage",
@@ -76,7 +76,7 @@ var (
Name: "TiDB",
Type: "string",
Description: "The image for TiDB cluster",
- JSONPath: ".spec.tidb.image",
+ JSONPath: ".status.tidb.image",
}
tidbClusterTiDBReadyColumn = extensionsobj.CustomResourceColumnDefinition{
Name: "Ready",
@@ -171,17 +171,51 @@ var (
Priority: 1,
JSONPath: ".status.phase",
}
+ autoScalerPrinterColumns []extensionsobj.CustomResourceColumnDefinition
+ // TODO add The current replicas number of TiKV cluster
+ autoScalerTiKVMaxReplicasColumn = extensionsobj.CustomResourceColumnDefinition{
+ Name: "TiKV-MaxReplicas",
+ Type: "integer",
+ Description: "The maximal replicas of TiKV",
+ JSONPath: ".spec.tikv.maxReplicas",
+ }
+ autoScalerTiKVMinReplicasColumn = extensionsobj.CustomResourceColumnDefinition{
+ Name: "TiKV-MinReplicas",
+ Type: "integer",
+ Description: "The minimal replicas of TiKV",
+ JSONPath: ".spec.tikv.minReplicas",
+ }
+ // TODO add The current replicas number of TiDB cluster
+ autoScalerTiDBMaxReplicasColumn = extensionsobj.CustomResourceColumnDefinition{
+ Name: "TiDB-MaxReplicas",
+ Type: "integer",
+ Description: "The maximal replicas of TiDB",
+ JSONPath: ".spec.tidb.maxReplicas",
+ }
+ autoScalerTiDBMinReplicasColumn = extensionsobj.CustomResourceColumnDefinition{
+ Name: "TiDB-MinReplicas",
+ Type: "integer",
+ Description: "The minimal replicas of TiDB",
+ JSONPath: ".spec.tidb.minReplicas",
+ }
+ ageColumn = extensionsobj.CustomResourceColumnDefinition{
+ Name: "Age",
+ Type: "date",
+ JSONPath: ".metadata.creationTimestamp",
+ }
)
func init() {
tidbClusteradditionalPrinterColumns = append(tidbClusteradditionalPrinterColumns,
tidbClusterPDColumn, tidbClusterPDStorageColumn, tidbClusterPDReadyColumn, tidbClusterPDDesireColumn,
tidbClusterTiKVColumn, tidbClusterTiKVStorageColumn, tidbClusterTiKVReadyColumn, tidbClusterTiKVDesireColumn,
- tidbClusterTiDBColumn, tidbClusterTiDBReadyColumn, tidbClusterTiDBDesireColumn)
- backupAdditionalPrinterColumns = append(backupAdditionalPrinterColumns, backupPathColumn, backupBackupSizeColumn, backupCommitTSColumn, backupStartedColumn, backupCompletedColumn)
- restoreAdditionalPrinterColumns = append(restoreAdditionalPrinterColumns, restoreStartedColumn, restoreCompletedColumn)
- bksAdditionalPrinterColumns = append(bksAdditionalPrinterColumns, bksScheduleColumn, bksMaxBackups, bksLastBackup, bksLastBackupTime)
- tidbInitializerPrinterColumns = append(tidbInitializerPrinterColumns, tidbInitializerPhase)
+ tidbClusterTiDBColumn, tidbClusterTiDBReadyColumn, tidbClusterTiDBDesireColumn, ageColumn)
+ backupAdditionalPrinterColumns = append(backupAdditionalPrinterColumns, backupPathColumn, backupBackupSizeColumn, backupCommitTSColumn, backupStartedColumn, backupCompletedColumn, ageColumn)
+ restoreAdditionalPrinterColumns = append(restoreAdditionalPrinterColumns, restoreStartedColumn, restoreCompletedColumn, ageColumn)
+ bksAdditionalPrinterColumns = append(bksAdditionalPrinterColumns, bksScheduleColumn, bksMaxBackups, bksLastBackup, bksLastBackupTime, ageColumn)
+ tidbInitializerPrinterColumns = append(tidbInitializerPrinterColumns, tidbInitializerPhase, ageColumn)
+ autoScalerPrinterColumns = append(autoScalerPrinterColumns, autoScalerTiDBMaxReplicasColumn, autoScalerTiDBMinReplicasColumn,
+ autoScalerTiKVMaxReplicasColumn, autoScalerTiKVMinReplicasColumn, ageColumn)
}
func NewCustomResourceDefinition(crdKind v1alpha1.CrdKind, group string, labels map[string]string, validation bool) *extensionsobj.CustomResourceDefinition {
@@ -242,6 +276,8 @@ func addAdditionalPrinterColumnsForCRD(crd *extensionsobj.CustomResourceDefiniti
case v1alpha1.DefaultCrdKinds.TiDBInitializer.Kind:
crd.Spec.AdditionalPrinterColumns = tidbInitializerPrinterColumns
break
+ case v1alpha1.DefaultCrdKinds.TidbClusterAutoScaler.Kind:
+ crd.Spec.AdditionalPrinterColumns = autoScalerPrinterColumns
default:
break
}
diff --git a/pkg/util/crypto/certs.go b/pkg/util/crypto/certs.go
index ba65ac5c91..c149ef5865 100644
--- a/pkg/util/crypto/certs.go
+++ b/pkg/util/crypto/certs.go
@@ -16,15 +16,13 @@ package crypto
import (
"crypto/rand"
"crypto/rsa"
- "crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
- "fmt"
"io/ioutil"
"net"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
const (
@@ -97,26 +95,11 @@ func ReadCACerts() (*x509.CertPool, error) {
// load k8s CA cert
caCert, err := ioutil.ReadFile(k8sCAFile)
if err != nil {
- glog.Errorf("fail to read CA file %s, error: %v", k8sCAFile, err)
+ klog.Errorf("fail to read CA file %s, error: %v", k8sCAFile, err)
return nil, err
}
if ok := rootCAs.AppendCertsFromPEM(caCert); !ok {
- glog.Warningf("fail to append CA file to pool, using system CAs only")
+ klog.Warningf("fail to append CA file to pool, using system CAs only")
}
return rootCAs, nil
}
-
-func LoadCerts(cert []byte, key []byte) (*x509.CertPool, tls.Certificate, error) {
- if cert == nil || key == nil {
- return nil, tls.Certificate{}, fmt.Errorf("fail to load certs, cert and key can not be empty")
- }
-
- rootCAs, err := ReadCACerts()
- if err != nil {
- return rootCAs, tls.Certificate{}, err
- }
-
- // load client cert
- tlsCert, err := tls.X509KeyPair(cert, key)
- return rootCAs, tlsCert, err
-}
diff --git a/pkg/util/util.go b/pkg/util/util.go
index 87ffd8450b..f518be3978 100644
--- a/pkg/util/util.go
+++ b/pkg/util/util.go
@@ -25,9 +25,16 @@ import (
"github.com/pingcap/tidb-operator/pkg/label"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
)
+var (
+ ClusterClientTLSPath = "/var/lib/cluster-client-tls"
+ TiDBClientTLSPath = "/var/lib/tidb-client-tls"
+)
+
func GetOrdinalFromPodName(podName string) (int32, error) {
ordinalStr := podName[strings.LastIndex(podName, "-")+1:]
ordinalInt, err := strconv.Atoi(ordinalStr)
@@ -126,3 +133,96 @@ func IsStatefulSetScaling(set *appsv1.StatefulSet) bool {
func GetStatefulSetName(tc *v1alpha1.TidbCluster, memberType v1alpha1.MemberType) string {
return fmt.Sprintf("%s-%s", tc.Name, memberType.String())
}
+
+func GetAutoScalingOutSlots(tc *v1alpha1.TidbCluster, memberType v1alpha1.MemberType) sets.Int32 {
+ s := sets.Int32{}
+ l := ""
+ switch memberType {
+ case v1alpha1.PDMemberType:
+ return s
+ case v1alpha1.TiKVMemberType:
+ l = label.AnnTiKVAutoScalingOutOrdinals
+ case v1alpha1.TiDBMemberType:
+ l = label.AnnTiDBAutoScalingOutOrdinals
+ default:
+ return s
+ }
+ if tc.Annotations == nil {
+ return s
+ }
+ v, existed := tc.Annotations[l]
+ if !existed {
+ return s
+ }
+ var slice []int32
+ err := json.Unmarshal([]byte(v), &slice)
+ if err != nil {
+ return s
+ }
+ s.Insert(slice...)
+ return s
+}
+
+func Encode(obj interface{}) (string, error) {
+ b, err := json.Marshal(obj)
+ if err != nil {
+ return "", err
+ }
+ return string(b), nil
+}
+
+func ClusterClientTLSSecretName(tcName string) string {
+ return fmt.Sprintf("%s-cluster-client-secret", tcName)
+}
+
+func ClusterTLSSecretName(tcName, component string) string {
+ return fmt.Sprintf("%s-%s-cluster-secret", tcName, component)
+}
+
+func TiDBClientTLSSecretName(tcName string) string {
+ return fmt.Sprintf("%s-tidb-client-secret", tcName)
+}
+
+// SortEnvByName implements sort.Interface to sort env list by name.
+type SortEnvByName []corev1.EnvVar
+
+func (e SortEnvByName) Len() int {
+ return len(e)
+}
+func (e SortEnvByName) Swap(i, j int) {
+ e[i], e[j] = e[j], e[i]
+}
+
+func (e SortEnvByName) Less(i, j int) bool {
+ return e[i].Name < e[j].Name
+}
+
+// AppendEnv appends envs `b` into `a` ignoring envs whose names already exist
+// in `b`.
+// Note that this will not change relative order of envs.
+func AppendEnv(a []corev1.EnvVar, b []corev1.EnvVar) []corev1.EnvVar {
+ aMap := make(map[string]corev1.EnvVar)
+ for _, e := range a {
+ aMap[e.Name] = e
+ }
+ for _, e := range b {
+ if _, ok := aMap[e.Name]; !ok {
+ a = append(a, e)
+ }
+ }
+ return a
+}
+
+// IsOwnedByTidbCluster checks if the given object is owned by TidbCluster.
+// Schema Kind and Group are checked, Version is ignored.
+func IsOwnedByTidbCluster(obj metav1.Object) (bool, *metav1.OwnerReference) {
+ ref := metav1.GetControllerOf(obj)
+ if ref == nil {
+ return false, nil
+ }
+ gv, err := schema.ParseGroupVersion(ref.APIVersion)
+ if err != nil {
+ return false, nil
+ }
+ return ref.Kind == v1alpha1.TiDBClusterKind && gv.Group == v1alpha1.SchemeGroupVersion.Group, ref
+}
diff --git a/pkg/util/utils_test.go b/pkg/util/utils_test.go
index cccbec683b..a4f9d4e740 100644
--- a/pkg/util/utils_test.go
+++ b/pkg/util/utils_test.go
@@ -16,9 +16,11 @@ package util
import (
"testing"
+ "github.com/google/go-cmp/cmp"
. "github.com/onsi/gomega"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/label"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
)
@@ -125,3 +127,63 @@ func TestGetPodOrdinals(t *testing.T) {
})
}
}
+
+func TestAppendEnv(t *testing.T) {
+ tests := []struct {
+ name string
+ a []corev1.EnvVar
+ b []corev1.EnvVar
+ want []corev1.EnvVar
+ }{
+ {
+ name: "envs whose names exist are ignored",
+ a: []corev1.EnvVar{
+ {
+ Name: "foo",
+ Value: "bar",
+ },
+ {
+ Name: "xxx",
+ Value: "xxx",
+ },
+ },
+ b: []corev1.EnvVar{
+ {
+ Name: "foo",
+ Value: "barbar",
+ },
+ {
+ Name: "new",
+ Value: "bar",
+ },
+ {
+ Name: "xxx",
+ Value: "yyy",
+ },
+ },
+ want: []corev1.EnvVar{
+ {
+ Name: "foo",
+ Value: "bar",
+ },
+ {
+ Name: "xxx",
+ Value: "xxx",
+ },
+ {
+ Name: "new",
+ Value: "bar",
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := AppendEnv(tt.a, tt.b)
+ if diff := cmp.Diff(tt.want, got); diff != "" {
+ t.Errorf("unwant (-want, +got): %s", diff)
+ }
+ })
+ }
+}
diff --git a/pkg/version/version.go b/pkg/version/version.go
index 4b75fce148..9e431936d8 100644
--- a/pkg/version/version.go
+++ b/pkg/version/version.go
@@ -17,7 +17,7 @@ import (
"fmt"
"runtime"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
var (
@@ -35,8 +35,8 @@ func PrintVersionInfo() {
// LogVersionInfo print version info at startup
func LogVersionInfo() {
- glog.Infof("Welcome to TiDB Operator.")
- glog.Infof("TiDB Operator Version: %#v", Get())
+ klog.Infof("Welcome to TiDB Operator.")
+ klog.Infof("TiDB Operator Version: %#v", Get())
}
// Get returns the overall codebase version. It's for detecting
diff --git a/pkg/webhook/admission_hooks.go b/pkg/webhook/admission_hooks.go
index f40a036f02..33e99722f5 100644
--- a/pkg/webhook/admission_hooks.go
+++ b/pkg/webhook/admission_hooks.go
@@ -96,7 +96,25 @@ func (a *AdmissionHook) MutatingResource() (plural schema.GroupVersionResource,
}
func (a *AdmissionHook) Admit(ar *admission.AdmissionRequest) *admission.AdmissionResponse {
- return a.strategyAC.Mutate(ar)
+ name := ar.Name
+ namespace := ar.Namespace
+ kind := ar.Kind.Kind
+ klog.Infof("receive mutation request for %s[%s/%s]", kind, namespace, name)
+
+ resp := a.strategyAC.Mutate(ar)
+ if !resp.Allowed {
+ return resp
+ }
+ // see if other ACs are interested in this resource
+ switch ar.Kind.Kind {
+ case "Pod":
+ if "" != ar.Kind.Group {
+ return a.unknownAdmissionRequest(ar)
+ }
+ return a.podAC.MutatePods(ar)
+ default:
+ return resp
+ }
}
// any special initialization goes here
diff --git a/pkg/webhook/pod/pd_deleter.go b/pkg/webhook/pod/pd_deleter.go
index 2159797015..5fed10f568 100644
--- a/pkg/webhook/pod/pd_deleter.go
+++ b/pkg/webhook/pod/pd_deleter.go
@@ -14,6 +14,7 @@
package pod
import (
+ "github.com/pingcap/advanced-statefulset/pkg/apis/apps/v1/helper"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/label"
pdutil "github.com/pingcap/tidb-operator/pkg/manager/member"
@@ -36,7 +37,7 @@ func (pc *PodAdmissionControl) admitDeletePdPods(payload *admitPayload) *admissi
// If the pd pod is deleted by restarter, it is necessary to check former pd restart status
if _, exist := payload.pod.Annotations[label.AnnPodDeferDeleting]; exist {
- existed, err := checkFormerPodRestartStatus(pc.kubeCli, v1alpha1.PDMemberType, payload.tc, namespace, ordinal, *payload.ownerStatefulSet.Spec.Replicas)
+ existed, err := checkFormerPodRestartStatus(pc.kubeCli, v1alpha1.PDMemberType, payload, ordinal)
if err != nil {
return util.ARFail(err)
}
@@ -191,10 +192,11 @@ func (pc *PodAdmissionControl) transferPDLeader(payload *admitPayload) *admissio
return util.ARFail(err)
}
tcName := payload.tc.Name
- lastOrdinal := payload.tc.Status.PD.StatefulSet.Replicas - 1
var targetName string
+
+ lastOrdinal := helper.GetMaxPodOrdinal(*payload.ownerStatefulSet.Spec.Replicas, payload.ownerStatefulSet)
if ordinal == lastOrdinal {
- targetName = pdutil.PdPodName(tcName, 0)
+ targetName = pdutil.PdPodName(tcName, helper.GetMinPodOrdinal(*payload.ownerStatefulSet.Spec.Replicas, payload.ownerStatefulSet))
} else {
targetName = pdutil.PdPodName(tcName, lastOrdinal)
}
diff --git a/pkg/webhook/pod/pod_mutater.go b/pkg/webhook/pod/pod_mutater.go
new file mode 100644
index 0000000000..2c33f785a8
--- /dev/null
+++ b/pkg/webhook/pod/pod_mutater.go
@@ -0,0 +1,129 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pod
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/BurntSushi/toml"
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ "github.com/pingcap/tidb-operator/pkg/features"
+ "github.com/pingcap/tidb-operator/pkg/label"
+ operatorUtils "github.com/pingcap/tidb-operator/pkg/util"
+ "github.com/pingcap/tidb-operator/pkg/webhook/util"
+ admissionv1beta1 "k8s.io/api/admission/v1beta1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/klog"
+)
+
+func (pc *PodAdmissionControl) mutatePod(ar *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse {
+ pod := &corev1.Pod{}
+ if err := json.Unmarshal(ar.Object.Raw, pod); err != nil {
+ return util.ARFail(err)
+ }
+ original := pod.DeepCopy()
+ l := label.Label(pod.Labels)
+ if !l.IsManagedByTiDBOperator() {
+ return util.ARSuccess()
+ }
+ if !l.IsTiKV() {
+ return util.ARSuccess()
+ }
+ tcName, exist := pod.Labels[label.InstanceLabelKey]
+ if !exist {
+ return util.ARSuccess()
+ }
+ namespace := ar.Namespace
+
+ tc, err := pc.operatorCli.PingcapV1alpha1().TidbClusters(namespace).Get(tcName, metav1.GetOptions{})
+ if err != nil {
+ if errors.IsNotFound(err) {
+ return util.ARSuccess()
+ }
+ return util.ARFail(err)
+ }
+
+ if features.DefaultFeatureGate.Enabled(features.AutoScaling) {
+ err := pc.tikvHotRegionSchedule(tc, pod)
+ if err != nil {
+ return util.ARFail(err)
+ }
+ }
+
+ patch, err := util.CreateJsonPatch(original, pod)
+ if err != nil {
+ return util.ARFail(err)
+ }
+ return util.ARPatch(patch)
+}
+
+func (pc *PodAdmissionControl) tikvHotRegionSchedule(tc *v1alpha1.TidbCluster, pod *corev1.Pod) error {
+ podName := pod.Name
+ ordinal, err := operatorUtils.GetOrdinalFromPodName(podName)
+ if err != nil {
+ return err
+ }
+ sets := operatorUtils.GetAutoScalingOutSlots(tc, v1alpha1.TiKVMemberType)
+ if !sets.Has(ordinal) {
+ return nil
+ }
+
+ cm, err := pc.getTikvConfigMap(tc, pod)
+ if err != nil {
+ klog.Infof("tc[%s/%s]'s tikv %s configmap not found, error: %v", tc.Namespace, tc.Name, pod.Name, err)
+ return err
+ }
+ v, ok := cm.Data["config-file"]
+ if !ok {
+ return fmt.Errorf("tc[%s/%s]'s tikv config[config-file] is missing", tc.Namespace, tc.Name)
+ }
+ config := &v1alpha1.TiKVConfig{}
+ err = toml.Unmarshal([]byte(v), config)
+ if err != nil {
+ return err
+ }
+ if config.Server == nil {
+ config.Server = &v1alpha1.TiKVServerConfig{}
+ }
+ if config.Server.Labels == nil {
+ config.Server.Labels = map[string]string{}
+ }
+ // TODO: add document to explain the hot region label
+ config.Server.Labels["specialUse"] = "hotRegion"
+ for id, c := range pod.Spec.Containers {
+ if c.Name == "tikv" {
+ appendExtraLabelsENVForTiKV(config.Server.Labels, &c)
+ pod.Spec.Containers[id] = c
+ break
+ }
+ }
+ return nil
+}
+
+// Get tikv original configmap from the pod spec template volume
+func (pc *PodAdmissionControl) getTikvConfigMap(tc *v1alpha1.TidbCluster, pod *corev1.Pod) (*corev1.ConfigMap, error) {
+ cnName := ""
+ for _, v := range pod.Spec.Volumes {
+ if (v.Name == "config" || v.Name == "startup-script") && v.ConfigMap != nil {
+ cnName = v.ConfigMap.Name
+ break
+ }
+ }
+ if cnName == "" {
+ return nil, fmt.Errorf("tc[%s/%s] 's tikv configmap can't find", tc.Namespace, tc.Name)
+ }
+ return pc.kubeCli.CoreV1().ConfigMaps(tc.Namespace).Get(cnName, metav1.GetOptions{})
+}
diff --git a/pkg/webhook/pod/pods.go b/pkg/webhook/pod/pods.go
index 15e1e46584..c4ac21050a 100644
--- a/pkg/webhook/pod/pods.go
+++ b/pkg/webhook/pod/pods.go
@@ -19,20 +19,18 @@ import (
"time"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
- apps "k8s.io/api/apps/v1"
-
- core "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/util/sets"
-
"github.com/pingcap/tidb-operator/pkg/client/clientset/versioned"
+ "github.com/pingcap/tidb-operator/pkg/features"
"github.com/pingcap/tidb-operator/pkg/label"
memberUtils "github.com/pingcap/tidb-operator/pkg/manager/member"
"github.com/pingcap/tidb-operator/pkg/pdapi"
- operatorUtils "github.com/pingcap/tidb-operator/pkg/util"
"github.com/pingcap/tidb-operator/pkg/webhook/util"
admission "k8s.io/api/admission/v1beta1"
+ apps "k8s.io/api/apps/v1"
+ core "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
"k8s.io/klog"
)
@@ -52,12 +50,19 @@ const (
stsControllerServiceAccounts = "system:serviceaccount:kube-system:statefulset-controller"
)
+var (
+ AstsControllerServiceAccounts string
+)
+
func NewPodAdmissionControl(kubeCli kubernetes.Interface, operatorCli versioned.Interface, PdControl pdapi.PDControlInterface, extraServiceAccounts []string, evictRegionLeaderTimeout time.Duration) *PodAdmissionControl {
serviceAccounts := sets.NewString(stsControllerServiceAccounts)
for _, sa := range extraServiceAccounts {
serviceAccounts.Insert(sa)
}
+ if features.DefaultFeatureGate.Enabled(features.AdvancedStatefulSet) {
+ serviceAccounts.Insert(AstsControllerServiceAccounts)
+ }
EvictLeaderTimeout = evictRegionLeaderTimeout
return &PodAdmissionControl{
kubeCli: kubeCli,
@@ -79,6 +84,13 @@ type admitPayload struct {
pdClient pdapi.PDClient
}
+func (pc *PodAdmissionControl) MutatePods(ar *admission.AdmissionRequest) *admission.AdmissionResponse {
+ if ar.Operation != admission.Create && ar.Operation != admission.Update {
+ return util.ARSuccess()
+ }
+ return pc.mutatePod(ar)
+}
+
func (pc *PodAdmissionControl) AdmitPods(ar *admission.AdmissionRequest) *admission.AdmissionResponse {
name := ar.Name
@@ -165,14 +177,10 @@ func (pc *PodAdmissionControl) admitDeletePods(name, namespace string) *admissio
return util.ARSuccess()
}
- ordinal, err := operatorUtils.GetOrdinalFromPodName(name)
- if err != nil {
- return util.ARFail(err)
- }
-
- // If there was only one replica for this statefulset,admit to delete it.
- if *ownerStatefulSet.Spec.Replicas == 1 && ordinal == 0 {
- klog.Infof("tc[%s/%s]'s pd only have one pod[%s/%s],admit to delete it.", namespace, tcName, namespace, name)
+ // When AdvancedStatefulSet is enabled, the ordinal of the last pod in the statefulset could be a non-zero number,
+ // so we let the deleting request of the last pod pass when spec.replicas <= 1 and status.replicas equals 1
+ if *ownerStatefulSet.Spec.Replicas <= 1 && ownerStatefulSet.Status.Replicas == 1 {
+ klog.Infof("tc[%s/%s]'s statefulset only have one pod[%s/%s],admit to delete it.", namespace, tcName, namespace, name)
return util.ARSuccess()
}
diff --git a/pkg/webhook/pod/tikv_creater.go b/pkg/webhook/pod/tikv_creater.go
index 520cac225a..036e557ca3 100644
--- a/pkg/webhook/pod/tikv_creater.go
+++ b/pkg/webhook/pod/tikv_creater.go
@@ -14,23 +14,29 @@
package pod
import (
+ "encoding/json"
"fmt"
"strings"
- "k8s.io/apimachinery/pkg/util/sets"
-
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/pdapi"
"github.com/pingcap/tidb-operator/pkg/webhook/util"
admission "k8s.io/api/admission/v1beta1"
core "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog"
)
const (
- tikvNotBootstrapped = `TiKV cluster not bootstrapped, please start TiKV first"`
+ tikvNotBootstrapped = `TiKV cluster not bootstrapped, please start TiKV first"`
+ evictSchedulerLeader = "evict-leader-scheduler"
)
+// Payload only used to unmarshal the data from pdapi
+type Payload struct {
+ StoreIdRanges map[string]interface{} `json:"store-id-ranges"`
+}
+
func (pc *PodAdmissionControl) admitCreateTiKVPod(pod *core.Pod, tc *v1alpha1.TidbCluster, pdClient pdapi.PDClient) *admission.AdmissionResponse {
name := pod.Name
@@ -61,10 +67,9 @@ func (pc *PodAdmissionControl) admitCreateTiKVPod(pod *core.Pod, tc *v1alpha1.Ti
return util.ARSuccess()
}
- schedulerIds := sets.String{}
- for _, s := range evictLeaderSchedulers {
- id := strings.Split(s, "-")[3]
- schedulerIds.Insert(id)
+ schedulerIds, err := filterLeaderEvictScheduler(evictLeaderSchedulers, pdClient)
+ if err != nil {
+ return util.ARFail(err)
}
// if the pod which is going to be created already have a store and was in evictLeaderSchedulers,
@@ -84,3 +89,34 @@ func (pc *PodAdmissionControl) admitCreateTiKVPod(pod *core.Pod, tc *v1alpha1.Ti
return util.ARSuccess()
}
+
+// This method is to make compatible between old pdapi version and 4.0 pdapi version.
+// To get more detail, see: https://github.com/pingcap/tidb-operator/pull/1831
+func filterLeaderEvictScheduler(evictLeaderSchedulers []string, pdClient pdapi.PDClient) (sets.String, error) {
+ schedulerIds := sets.String{}
+ if len(evictLeaderSchedulers) == 1 && evictLeaderSchedulers[0] == evictSchedulerLeader {
+ c, err := pdClient.GetConfig()
+ if err != nil {
+ return schedulerIds, err
+ }
+ if c.Schedule != nil && c.Schedule.SchedulersPayload != nil {
+ v, ok := c.Schedule.SchedulersPayload[evictSchedulerLeader]
+ if ok {
+ payload := &Payload{}
+ err := json.Unmarshal([]byte(v), payload)
+ if err != nil {
+ return schedulerIds, err
+ }
+ for k := range payload.StoreIdRanges {
+ schedulerIds.Insert(k)
+ }
+ }
+ }
+ } else {
+ for _, s := range evictLeaderSchedulers {
+ id := strings.Split(s, "-")[3]
+ schedulerIds.Insert(id)
+ }
+ }
+ return schedulerIds, nil
+}
diff --git a/pkg/webhook/pod/tikv_deleter.go b/pkg/webhook/pod/tikv_deleter.go
index 680dd4a947..fb5fe443bd 100644
--- a/pkg/webhook/pod/tikv_deleter.go
+++ b/pkg/webhook/pod/tikv_deleter.go
@@ -58,7 +58,7 @@ func (pc *PodAdmissionControl) admitDeleteTiKVPods(payload *admitPayload) *admis
// If the tikv pod is deleted by restarter, it is necessary to check former tikv restart status
if _, exist := payload.pod.Annotations[label.AnnPodDeferDeleting]; exist {
- existed, err := checkFormerPodRestartStatus(pc.kubeCli, v1alpha1.TiKVMemberType, payload.tc, namespace, ordinal, *payload.ownerStatefulSet.Spec.Replicas)
+ existed, err := checkFormerPodRestartStatus(pc.kubeCli, v1alpha1.TiKVMemberType, payload, ordinal)
if err != nil {
return util.ARFail(err)
}
@@ -91,7 +91,7 @@ func (pc *PodAdmissionControl) admitDeleteTiKVPods(payload *admitPayload) *admis
}
}
- if storeInfo == nil || storeInfo.Store == nil {
+ if !existed || storeInfo == nil || storeInfo.Store == nil {
klog.Infof("tc[%s/%s]'s tikv pod[%s/%s] can't be found store", namespace, tcName, namespace, name)
return pc.admitDeleteUselessTiKVPod(payload)
}
@@ -100,9 +100,9 @@ func (pc *PodAdmissionControl) admitDeleteTiKVPods(payload *admitPayload) *admis
case v1alpha1.TiKVStateTombstone:
return pc.admitDeleteUselessTiKVPod(payload)
case v1alpha1.TiKVStateOffline:
- return pc.admitDeleteOfflineTiKVPod()
+ return pc.rejectDeleteTiKVPod()
case v1alpha1.TiKVStateDown:
- return pc.admitDeleteUselessTiKVPod(payload)
+ return pc.admitDeleteDownTikvPod(payload)
case v1alpha1.TiKVStateUp:
return pc.admitDeleteUpTiKVPod(payload, storeInfo, storesInfo)
default:
@@ -147,7 +147,7 @@ func (pc *PodAdmissionControl) admitDeleteUselessTiKVPod(payload *admitPayload)
return util.ARSuccess()
}
-func (pc *PodAdmissionControl) admitDeleteOfflineTiKVPod() *admission.AdmissionResponse {
+func (pc *PodAdmissionControl) rejectDeleteTiKVPod() *admission.AdmissionResponse {
return &admission.AdmissionResponse{
Allowed: false,
}
@@ -217,3 +217,19 @@ func (pc *PodAdmissionControl) admitDeleteUpTiKVPodDuringUpgrading(payload *admi
return util.ARSuccess()
}
+
+// When the target tikv's store is DOWN, we would not pass the deleting request during scale-in, otherwise it would cause
+// the duplicated id problem for the newly tikv pod.
+// Users should offline the target tikv into tombstone first, then scale-in it.
+// In other cases, we would admit to delete the down tikv pod like upgrading.
+func (pc *PodAdmissionControl) admitDeleteDownTikvPod(payload *admitPayload) *admission.AdmissionResponse {
+
+ isInOrdinal, err := operatorUtils.IsPodOrdinalNotExceedReplicas(payload.pod, payload.ownerStatefulSet)
+ if err != nil {
+ return util.ARFail(err)
+ }
+ if !isInOrdinal {
+ return pc.rejectDeleteTiKVPod()
+ }
+ return util.ARSuccess()
+}
diff --git a/pkg/webhook/pod/tikv_deleter_test.go b/pkg/webhook/pod/tikv_deleter_test.go
index bf3b42a11a..b0e7d0c8d3 100644
--- a/pkg/webhook/pod/tikv_deleter_test.go
+++ b/pkg/webhook/pod/tikv_deleter_test.go
@@ -212,7 +212,7 @@ func TestTiKVDeleterDelete(t *testing.T) {
UpdatePVCErr: false,
PVCNotFound: false,
expectFn: func(g *GomegaWithT, response *admission.AdmissionResponse) {
- g.Expect(response.Allowed).Should(Equal(true))
+ g.Expect(response.Allowed).Should(Equal(false))
},
},
{
@@ -236,7 +236,7 @@ func TestTiKVDeleterDelete(t *testing.T) {
UpdatePVCErr: true,
PVCNotFound: true,
expectFn: func(g *GomegaWithT, response *admission.AdmissionResponse) {
- g.Expect(response.Allowed).Should(Equal(true))
+ g.Expect(response.Allowed).Should(Equal(false))
},
},
{
diff --git a/pkg/webhook/pod/util.go b/pkg/webhook/pod/util.go
index 65b2511b37..16ea0cdaf8 100644
--- a/pkg/webhook/pod/util.go
+++ b/pkg/webhook/pod/util.go
@@ -17,7 +17,9 @@ import (
"fmt"
"time"
+ "github.com/pingcap/advanced-statefulset/pkg/apis/apps/v1/helper"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ "github.com/pingcap/tidb-operator/pkg/features"
"github.com/pingcap/tidb-operator/pkg/label"
memberUtil "github.com/pingcap/tidb-operator/pkg/manager/member"
"github.com/pingcap/tidb-operator/pkg/pdapi"
@@ -136,9 +138,13 @@ func getOwnerStatefulSetForTiDBComponent(pod *core.Pod, kubeCli kubernetes.Inter
// checkFormerPodRestartStatus checks whether there are any former pod is going to be restarted
// return true if existed
-func checkFormerPodRestartStatus(kubeCli kubernetes.Interface, memberType v1alpha1.MemberType, tc *v1alpha1.TidbCluster, namespace string, ordinal int32, replicas int32) (bool, error) {
- for i := replicas - 1; i > ordinal; i-- {
- podName := memberUtil.MemberPodName(tc.Name, i, memberType)
+func checkFormerPodRestartStatus(kubeCli kubernetes.Interface, memberType v1alpha1.MemberType, payload *admitPayload, ordinal int32) (bool, error) {
+ namespace := payload.tc.Namespace
+ tc := payload.tc
+ replicas := *payload.ownerStatefulSet.Spec.Replicas
+
+ f := func(name string, ordinal int32, memberType v1alpha1.MemberType) (bool, error) {
+ podName := memberUtil.MemberPodName(tc.Name, ordinal, memberType)
pod, err := kubeCli.CoreV1().Pods(namespace).Get(podName, meta.GetOptions{})
if err != nil {
return false, err
@@ -146,6 +152,54 @@ func checkFormerPodRestartStatus(kubeCli kubernetes.Interface, memberType v1alph
if _, existed := pod.Annotations[label.AnnPodDeferDeleting]; existed {
return true, nil
}
+ return false, nil
+ }
+
+ if features.DefaultFeatureGate.Enabled(features.AdvancedStatefulSet) {
+ for k := range helper.GetPodOrdinals(replicas, payload.ownerStatefulSet) {
+ if k > ordinal {
+ existed, err := f(tc.Name, k, memberType)
+ if err != nil {
+ return false, err
+ }
+ if existed {
+ return true, nil
+ }
+ }
+ }
+ } else {
+ for i := replicas - 1; i > ordinal; i-- {
+ existed, err := f(tc.Name, i, memberType)
+ if err != nil {
+ return false, err
+ }
+ if existed {
+ return true, nil
+ }
+ }
}
return false, nil
}
+
+func appendExtraLabelsENVForTiKV(labels map[string]string, container *core.Container) {
+ s := ""
+ for k, v := range labels {
+ s = fmt.Sprintf("%s,%s", s, fmt.Sprintf("%s=%s", k, v))
+ }
+ s = s[1:]
+ existed := false
+ for id, env := range container.Env {
+ if env.Name == "STORE_LABELS" {
+ env.Value = fmt.Sprintf("%s,%s", env.Value, s)
+ container.Env[id] = env
+ existed = true
+ break
+ }
+ }
+ if !existed {
+ container.Env = append(container.Env, core.EnvVar{
+ Name: "STORE_LABELS",
+ Value: s,
+ })
+ }
+}
diff --git a/static/tidb-operator-overview.png b/static/tidb-operator-overview.png
index 7a74eb0468..c3c1965e20 100644
Binary files a/static/tidb-operator-overview.png and b/static/tidb-operator-overview.png differ
diff --git a/tests/actions.go b/tests/actions.go
index ce21dc51d2..0a52dc950a 100644
--- a/tests/actions.go
+++ b/tests/actions.go
@@ -69,7 +69,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
aggregatorclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@@ -124,7 +124,7 @@ func NewOperatorActions(cli versioned.Interface,
framework.ExpectNoError(err)
oa.tidbControl = proxiedtidbclient.NewProxiedTiDBClient(fw, kubeCfg.TLSClientConfig.CAData)
} else {
- oa.tidbControl = controller.NewDefaultTiDBControl()
+ oa.tidbControl = controller.NewDefaultTiDBControl(kubeCli)
}
oa.clusterEvents = make(map[string]*clusterEvent)
for _, c := range clusters {
@@ -138,7 +138,7 @@ func NewOperatorActions(cli versioned.Interface,
}
const (
- DefaultPollTimeout time.Duration = 10 * time.Minute
+ DefaultPollTimeout time.Duration = 20 * time.Minute
DefaultPollInterval time.Duration = 1 * time.Minute
BackupAndRestorePollTimeOut time.Duration = 60 * time.Minute
grafanaUsername = "admin"
@@ -230,16 +230,14 @@ type OperatorActions interface {
LabelNodesOrDie()
CheckDisasterTolerance(info *TidbClusterConfig) error
CheckDisasterToleranceOrDie(info *TidbClusterConfig)
- GetTidbMemberAssignedNodes(info *TidbClusterConfig) (map[string]string, error)
- GetTidbMemberAssignedNodesOrDie(info *TidbClusterConfig) map[string]string
- CheckTidbMemberAssignedNodes(info *TidbClusterConfig, oldAssignedNodes map[string]string) error
- CheckTidbMemberAssignedNodesOrDie(info *TidbClusterConfig, oldAssignedNodes map[string]string)
CheckUpgradeComplete(info *TidbClusterConfig) error
CheckUpgradeCompleteOrDie(info *TidbClusterConfig)
CheckInitSQL(info *TidbClusterConfig) error
CheckInitSQLOrDie(info *TidbClusterConfig)
DeployAndCheckPump(tc *TidbClusterConfig) error
WaitForTidbClusterReady(tc *v1alpha1.TidbCluster, timeout, pollInterval time.Duration) error
+ WaitPodOnNodeReadyOrDie(clusters []*TidbClusterConfig, faultNode string)
+ DataIsTheSameAs(from, to *TidbClusterConfig) (bool, error)
}
type operatorActions struct {
@@ -296,6 +294,8 @@ type OperatorConfig struct {
DefaultingEnabled bool
ValidatingEnabled bool
Cabundle string
+ BackupImage string
+ AutoFailover *bool
}
type TidbClusterConfig struct {
@@ -408,19 +408,25 @@ func (tc *TidbClusterConfig) TidbClusterHelmSetString(m map[string]string) strin
func (oi *OperatorConfig) OperatorHelmSetString(m map[string]string) string {
set := map[string]string{
- "operatorImage": oi.Image,
- "controllerManager.autoFailover": "true",
- "scheduler.kubeSchedulerImageName": oi.SchedulerImage,
- "controllerManager.logLevel": oi.LogLevel,
- "scheduler.logLevel": "4",
- "imagePullPolicy": string(oi.ImagePullPolicy),
- "testMode": strconv.FormatBool(oi.TestMode),
- "admissionWebhook.cabundle": oi.Cabundle,
- "admissionWebhook.create": strconv.FormatBool(oi.WebhookEnabled),
- "admissionWebhook.hooksEnabled.pods": strconv.FormatBool(oi.PodWebhookEnabled),
- "admissionWebhook.hooksEnabled.statefulSets": strconv.FormatBool(oi.StsWebhookEnabled),
- "admissionWebhook.hooksEnabled.defaulting": strconv.FormatBool(oi.DefaultingEnabled),
- "admissionWebhook.hooksEnabled.validating": strconv.FormatBool(oi.ValidatingEnabled),
+ "operatorImage": oi.Image,
+ "tidbBackupManagerImage": oi.BackupImage,
+ "scheduler.logLevel": "4",
+ "testMode": strconv.FormatBool(oi.TestMode),
+ "admissionWebhook.cabundle": oi.Cabundle,
+ "admissionWebhook.create": strconv.FormatBool(oi.WebhookEnabled),
+ "admissionWebhook.validation.pods": strconv.FormatBool(oi.PodWebhookEnabled),
+ "admissionWebhook.validation.statefulSets": strconv.FormatBool(oi.StsWebhookEnabled),
+ "admissionWebhook.mutation.pingcapResources": strconv.FormatBool(oi.DefaultingEnabled),
+ "admissionWebhook.validation.pingcapResources": strconv.FormatBool(oi.ValidatingEnabled),
+ }
+ if oi.LogLevel != "" {
+ set["controllerManager.logLevel"] = oi.LogLevel
+ }
+ if oi.SchedulerImage != "" {
+ set["scheduler.kubeSchedulerImageName"] = oi.SchedulerImage
+ }
+ if string(oi.ImagePullPolicy) != "" {
+ set["imagePullPolicy"] = string(oi.ImagePullPolicy)
}
if oi.ControllerManagerReplicas != nil {
set["controllerManager.replicas"] = strconv.Itoa(*oi.ControllerManagerReplicas)
@@ -437,6 +443,9 @@ func (oi *OperatorConfig) OperatorHelmSetString(m map[string]string) string {
if oi.Enabled(features.AdvancedStatefulSet) {
set["advancedStatefulset.create"] = "true"
}
+ if oi.AutoFailover != nil {
+ set["controllerManager.autoFailover"] = strconv.FormatBool(*oi.AutoFailover)
+ }
arr := make([]string, 0, len(set))
for k, v := range set {
@@ -457,24 +466,34 @@ func (oi *OperatorConfig) Enabled(feature string) bool {
func (oa *operatorActions) runKubectlOrDie(args ...string) string {
cmd := "kubectl"
- glog.Infof("Running '%s %s'", cmd, strings.Join(args, " "))
+ klog.Infof("Running '%s %s'", cmd, strings.Join(args, " "))
out, err := exec.Command(cmd, args...).CombinedOutput()
if err != nil {
- glog.Fatalf("Failed to run '%s %s'\nCombined output: %q\nError: %v", cmd, strings.Join(args, " "), string(out), err)
+ klog.Fatalf("Failed to run '%s %s'\nCombined output: %q\nError: %v", cmd, strings.Join(args, " "), string(out), err)
}
- glog.Infof("Combined output: %q", string(out))
+ klog.Infof("Combined output: %q", string(out))
return string(out)
}
func (oa *operatorActions) CleanCRDOrDie() {
- oa.runKubectlOrDie("delete", "crds", "--all")
+ crdList, err := oa.apiExtCli.ApiextensionsV1beta1().CustomResourceDefinitions().List(metav1.ListOptions{})
+ framework.ExpectNoError(err)
+ for _, crd := range crdList.Items {
+ if !strings.HasSuffix(crd.Name, ".pingcap.com") {
+ framework.Logf("CRD %q ignored", crd.Name)
+ continue
+ }
+ framework.Logf("Deleting CRD %q", crd.Name)
+ err = oa.apiExtCli.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(crd.Name, &metav1.DeleteOptions{})
+ framework.ExpectNoError(err)
+ }
}
// InstallCRDOrDie install CRDs and wait for them to be established in Kubernetes.
func (oa *operatorActions) InstallCRDOrDie(info *OperatorConfig) {
if info.Enabled(features.AdvancedStatefulSet) {
if isSupported, err := utildiscovery.IsAPIGroupVersionSupported(oa.kubeCli.Discovery(), "apiextensions.k8s.io/v1"); err != nil {
- glog.Fatal(err)
+ klog.Fatal(err)
} else if isSupported {
oa.runKubectlOrDie("apply", "-f", oa.manifestPath("e2e/advanced-statefulset-crd.v1.yaml"))
} else {
@@ -483,19 +502,19 @@ func (oa *operatorActions) InstallCRDOrDie(info *OperatorConfig) {
}
oa.runKubectlOrDie("apply", "-f", oa.manifestPath("e2e/crd.yaml"))
oa.runKubectlOrDie("apply", "-f", oa.manifestPath("e2e/data-resource-crd.yaml"))
- glog.Infof("Wait for all CRDs are established")
+ klog.Infof("Wait for all CRDs are established")
e2eutil.WaitForCRDsEstablished(oa.apiExtCli, labels.Everything())
// workaround for https://github.com/kubernetes/kubernetes/issues/65517
- glog.Infof("force sync kubectl cache")
+ klog.Infof("force sync kubectl cache")
cmdArgs := []string{"sh", "-c", "rm -rf ~/.kube/cache ~/.kube/http-cache"}
_, err := exec.Command(cmdArgs[0], cmdArgs[1:]...).CombinedOutput()
if err != nil {
- glog.Fatalf("Failed to run '%s': %v", strings.Join(cmdArgs, " "), err)
+ klog.Fatalf("Failed to run '%s': %v", strings.Join(cmdArgs, " "), err)
}
}
func (oa *operatorActions) DeployOperator(info *OperatorConfig) error {
- glog.Infof("deploying tidb-operator %s", info.ReleaseName)
+ klog.Infof("deploying tidb-operator %s", info.ReleaseName)
if info.Tag != "e2e" {
if err := oa.cloneOperatorRepo(); err != nil {
@@ -518,14 +537,14 @@ func (oa *operatorActions) DeployOperator(info *OperatorConfig) error {
info.ReleaseName,
info.Namespace,
info.OperatorHelmSetString(nil))
- glog.Info(cmd)
+ klog.Info(cmd)
res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput()
if err != nil {
return fmt.Errorf("failed to deploy operator: %v, %s", err, string(res))
}
- glog.Infof("Wait for all apiesrvices are available")
+ klog.Infof("Wait for all apiesrvices are available")
return e2eutil.WaitForAPIServicesAvaiable(oa.aggrCli, labels.Everything())
}
@@ -536,7 +555,7 @@ func (oa *operatorActions) DeployOperatorOrDie(info *OperatorConfig) {
}
func (oa *operatorActions) CleanOperator(info *OperatorConfig) error {
- glog.Infof("cleaning tidb-operator %s", info.ReleaseName)
+ klog.Infof("cleaning tidb-operator %s", info.ReleaseName)
res, err := exec.Command("helm", "del", "--purge", info.ReleaseName).CombinedOutput()
@@ -554,7 +573,7 @@ func (oa *operatorActions) CleanOperatorOrDie(info *OperatorConfig) {
}
func (oa *operatorActions) UpgradeOperator(info *OperatorConfig) error {
- glog.Infof("upgrading tidb-operator %s", info.ReleaseName)
+ klog.Infof("upgrading tidb-operator %s", info.ReleaseName)
listOptions := metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(
@@ -580,7 +599,7 @@ func (oa *operatorActions) UpgradeOperator(info *OperatorConfig) error {
return fmt.Errorf("failed to upgrade operator to: %s, %v, %s", info.Image, err, string(res))
}
- glog.Infof("Wait for all apiesrvices are available")
+ klog.Infof("Wait for all apiesrvices are available")
err = e2eutil.WaitForAPIServicesAvaiable(oa.aggrCli, labels.Everything())
if err != nil {
return err
@@ -594,7 +613,7 @@ func (oa *operatorActions) UpgradeOperator(info *OperatorConfig) error {
waitFn := func() (done bool, err error) {
pods2, err := oa.kubeCli.CoreV1().Pods(metav1.NamespaceAll).List(listOptions)
if err != nil {
- glog.Error(err)
+ klog.Error(err)
return false, nil
}
@@ -625,18 +644,18 @@ func ensurePodsUnchanged(pods1, pods2 *corev1.PodList) error {
return err
}
if reflect.DeepEqual(pods1UIDs, pods2UIDs) {
- glog.V(4).Infof("%s", string(pods1Yaml))
- glog.V(4).Infof("%s", string(pods2Yaml))
- glog.V(4).Infof("%v", pods1UIDs)
- glog.V(4).Infof("%v", pods2UIDs)
- glog.V(4).Infof("pods unchanged after operator upgraded")
+ klog.V(4).Infof("%s", string(pods1Yaml))
+ klog.V(4).Infof("%s", string(pods2Yaml))
+ klog.V(4).Infof("%v", pods1UIDs)
+ klog.V(4).Infof("%v", pods2UIDs)
+ klog.V(4).Infof("pods unchanged after operator upgraded")
return nil
}
- glog.Infof("%s", string(pods1Yaml))
- glog.Infof("%s", string(pods2Yaml))
- glog.Infof("%v", pods1UIDs)
- glog.Infof("%v", pods2UIDs)
+ klog.Infof("%s", string(pods1Yaml))
+ klog.Infof("%s", string(pods2Yaml))
+ klog.Infof("%v", pods1UIDs)
+ klog.Infof("%v", pods2UIDs)
return fmt.Errorf("some pods changed after operator upgraded")
}
@@ -665,7 +684,7 @@ func (oa *operatorActions) DeployTidbCluster(info *TidbClusterConfig) error {
return nil
}
- glog.Infof("deploying tidb cluster [%s/%s]", info.Namespace, info.ClusterName)
+ klog.Infof("deploying tidb cluster [%s/%s]", info.Namespace, info.ClusterName)
oa.EmitEvent(info, "DeployTidbCluster")
namespace := &corev1.Namespace{
@@ -691,7 +710,7 @@ func (oa *operatorActions) DeployTidbCluster(info *TidbClusterConfig) error {
return err
}
cmd = fmt.Sprintf(" %s --values %s", cmd, svFilePath)
- glog.Info(cmd)
+ klog.Info(cmd)
if res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput(); err != nil {
return fmt.Errorf("failed to deploy tidbcluster: %s/%s, %v, %s",
@@ -708,7 +727,7 @@ func (oa *operatorActions) DeployTidbClusterOrDie(info *TidbClusterConfig) {
}
func (oa *operatorActions) CleanTidbCluster(info *TidbClusterConfig) error {
- glog.Infof("cleaning tidbcluster %s/%s", info.Namespace, info.ClusterName)
+ klog.Infof("cleaning tidbcluster %s/%s", info.Namespace, info.ClusterName)
oa.EmitEvent(info, "CleanTidbCluster")
ns := info.Namespace
tcName := info.ClusterName
@@ -738,7 +757,7 @@ func (oa *operatorActions) CleanTidbCluster(info *TidbClusterConfig) error {
for _, pvc := range pvcList.Items {
beforePVCNames = append(beforePVCNames, pvc.GetName())
}
- glog.V(4).Info(beforePVCNames)
+ klog.V(4).Info(beforePVCNames)
pvList, err := oa.kubeCli.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
@@ -747,10 +766,10 @@ func (oa *operatorActions) CleanTidbCluster(info *TidbClusterConfig) error {
var beforePVNames []string
for _, pv := range pvList.Items {
beforePVNames = append(beforePVNames, pv.GetName())
- glog.V(4).Infof("%s, %s, %v", pv.Name, pv.Spec.PersistentVolumeReclaimPolicy, pv.Labels)
- glog.V(4).Info(pv.Spec.ClaimRef)
+ klog.V(4).Infof("%s, %s, %v", pv.Name, pv.Spec.PersistentVolumeReclaimPolicy, pv.Labels)
+ klog.V(4).Info(pv.Spec.ClaimRef)
}
- glog.V(4).Info(beforePVNames)
+ klog.V(4).Info(beforePVNames)
charts := []string{
info.ClusterName,
@@ -779,7 +798,7 @@ func (oa *operatorActions) CleanTidbCluster(info *TidbClusterConfig) error {
for _, pvc := range pvcList.Items {
afterPVCNames = append(afterPVCNames, pvc.GetName())
}
- glog.V(4).Info(afterPVCNames)
+ klog.V(4).Info(afterPVCNames)
if !reflect.DeepEqual(beforePVCNames, afterPVCNames) {
return fmt.Errorf("pvc changed when we delete cluster: %s/%s, before: %v, after: %v",
ns, tcName, beforePVCNames, afterPVCNames)
@@ -794,10 +813,10 @@ func (oa *operatorActions) CleanTidbCluster(info *TidbClusterConfig) error {
for _, pv := range pvList.Items {
afterPVNames = append(afterPVNames, pv.GetName())
}
- glog.V(4).Info(afterPVNames)
+ klog.V(4).Info(afterPVNames)
if !reflect.DeepEqual(beforePVNames, afterPVNames) {
- glog.Errorf("pv changed when we delete cluster: %s/%s, before: %v, after: %v",
+ klog.Errorf("pv changed when we delete cluster: %s/%s, before: %v, after: %v",
ns, tcName, beforePVNames, afterPVNames)
return false, nil
}
@@ -859,7 +878,7 @@ func (oa *operatorActions) CleanTidbCluster(info *TidbClusterConfig) error {
label.ManagedByLabelKey, "tidb-operator",
label.NamespaceLabelKey, info.Namespace,
label.InstanceLabelKey, info.ClusterName)
- glog.V(4).Info(patchPVCmd)
+ klog.V(4).Info(patchPVCmd)
if res, err := exec.Command("/bin/sh", "-c", patchPVCmd).CombinedOutput(); err != nil {
return fmt.Errorf("failed to patch pv: %v, %s", err, string(res))
}
@@ -867,18 +886,18 @@ func (oa *operatorActions) CleanTidbCluster(info *TidbClusterConfig) error {
pollFn := func() (bool, error) {
if res, err := exec.Command("kubectl", "get", "po", "--output=name", "-n", info.Namespace, "-l", setStr).
CombinedOutput(); err != nil || len(res) != 0 {
- glog.V(4).Infof("waiting for tidbcluster: %s/%s pods deleting, %v, [%s]",
+ klog.V(4).Infof("waiting for tidbcluster: %s/%s pods deleting, %v, [%s]",
info.Namespace, info.ClusterName, err, string(res))
return false, nil
}
pvCmd := fmt.Sprintf("kubectl get pv | grep %s | grep %s 2>/dev/null|grep Released",
info.Namespace, info.ClusterName)
- glog.V(4).Info(pvCmd)
+ klog.V(4).Info(pvCmd)
if res, err := exec.Command("/bin/sh", "-c", pvCmd).CombinedOutput(); len(res) == 0 {
return true, nil
} else if err != nil {
- glog.V(4).Infof("waiting for tidbcluster: %s/%s pv deleting, %v, %s",
+ klog.V(4).Infof("waiting for tidbcluster: %s/%s pv deleting, %v, %s",
info.Namespace, info.ClusterName, err, string(res))
return false, nil
}
@@ -893,56 +912,8 @@ func (oa *operatorActions) CleanTidbClusterOrDie(info *TidbClusterConfig) {
}
}
-func (oa *operatorActions) GetTidbMemberAssignedNodes(info *TidbClusterConfig) (map[string]string, error) {
- assignedNodes := make(map[string]string)
- ns := info.Namespace
- tcName := info.ClusterName
- listOptions := metav1.ListOptions{
- LabelSelector: labels.SelectorFromSet(
- label.New().Instance(tcName).Component(label.TiDBLabelVal).Labels()).String(),
- }
- podList, err := oa.kubeCli.CoreV1().Pods(ns).List(listOptions)
- if err != nil {
- glog.Errorf("failed to get tidb pods: %s/%s, %v", ns, tcName, err)
- return nil, err
- }
- for _, pod := range podList.Items {
- assignedNodes[pod.Name] = pod.Spec.NodeName
- }
- return assignedNodes, nil
-}
-
-func (oa *operatorActions) GetTidbMemberAssignedNodesOrDie(info *TidbClusterConfig) map[string]string {
- result, err := oa.GetTidbMemberAssignedNodes(info)
- if err != nil {
- slack.NotifyAndPanic(err)
- }
- return result
-}
-
-func (oa *operatorActions) CheckTidbMemberAssignedNodes(info *TidbClusterConfig, oldAssignedNodes map[string]string) error {
- glog.Infof("checking tidb member [%s/%s] assigned nodes", info.Namespace, info.ClusterName)
- assignedNodes, err := oa.GetTidbMemberAssignedNodes(info)
- if err != nil {
- return err
- }
- for member, node := range oldAssignedNodes {
- newNode, ok := assignedNodes[member]
- if !ok || newNode != node {
- return fmt.Errorf("tidb member %s is not scheduled to %s, new node: %s", member, node, newNode)
- }
- }
- return nil
-}
-
-func (oa *operatorActions) CheckTidbMemberAssignedNodesOrDie(info *TidbClusterConfig, oldAssignedNodes map[string]string) {
- if err := oa.CheckTidbMemberAssignedNodes(info, oldAssignedNodes); err != nil {
- slack.NotifyAndPanic(err)
- }
-}
-
func (oa *operatorActions) CheckTidbClusterStatus(info *TidbClusterConfig) error {
- glog.Infof("checking tidb cluster [%s/%s] status", info.Namespace, info.ClusterName)
+ klog.Infof("checking tidb cluster [%s/%s] status", info.Namespace, info.ClusterName)
ns := info.Namespace
tcName := info.ClusterName
@@ -950,7 +921,7 @@ func (oa *operatorActions) CheckTidbClusterStatus(info *TidbClusterConfig) error
var tc *v1alpha1.TidbCluster
var err error
if tc, err = oa.cli.PingcapV1alpha1().TidbClusters(ns).Get(tcName, metav1.GetOptions{}); err != nil {
- glog.Errorf("failed to get tidbcluster: %s/%s, %v", ns, tcName, err)
+ klog.Errorf("failed to get tidbcluster: %s/%s, %v", ns, tcName, err)
return false, nil
}
@@ -961,69 +932,69 @@ func (oa *operatorActions) CheckTidbClusterStatus(info *TidbClusterConfig) error
return false, nil
}
- glog.V(4).Infof("check tidb cluster begin tidbMembersReadyFn")
+ klog.V(4).Infof("check tidb cluster begin tidbMembersReadyFn")
if b, err := oa.tidbMembersReadyFn(tc); !b && err == nil {
return false, nil
}
- glog.V(4).Infof("check tidb cluster begin reclaimPolicySyncFn")
+ klog.V(4).Infof("check tidb cluster begin reclaimPolicySyncFn")
if b, err := oa.reclaimPolicySyncFn(tc); !b && err == nil {
return false, nil
}
- glog.V(4).Infof("check tidb cluster begin metaSyncFn")
+ klog.V(4).Infof("check tidb cluster begin metaSyncFn")
if b, err := oa.metaSyncFn(tc); !b && err == nil {
return false, nil
} else if err != nil {
- glog.Error(err)
+ klog.Error(err)
return false, nil
}
- glog.V(4).Infof("check tidb cluster begin schedulerHAFn")
+ klog.V(4).Infof("check tidb cluster begin schedulerHAFn")
if b, err := oa.schedulerHAFn(tc); !b && err == nil {
return false, nil
}
- glog.V(4).Infof("check all pd and tikv instances have not pod scheduling annotation")
+ klog.V(4).Infof("check all pd and tikv instances have not pod scheduling annotation")
if info.OperatorTag != "v1.0.0" {
if b, err := oa.podsScheduleAnnHaveDeleted(tc); !b && err == nil {
return false, nil
}
}
- glog.V(4).Infof("check store labels")
+ klog.V(4).Infof("check store labels")
if b, err := oa.storeLabelsIsSet(tc, info.TopologyKey); !b && err == nil {
return false, nil
} else if err != nil {
return false, err
}
- glog.V(4).Infof("check tidb cluster begin passwordIsSet")
+ klog.V(4).Infof("check tidb cluster begin passwordIsSet")
if b, err := oa.passwordIsSet(info); !b && err == nil {
return false, nil
}
if info.Monitor {
- glog.V(4).Infof("check tidb monitor normal")
+ klog.V(4).Infof("check tidb monitor normal")
if b, err := oa.monitorNormal(info); !b && err == nil {
return false, nil
}
}
if info.EnableConfigMapRollout {
- glog.V(4).Info("check tidb cluster configuration synced")
+ klog.V(4).Info("check tidb cluster configuration synced")
if b, err := oa.checkTidbClusterConfigUpdated(tc, info); !b && err == nil {
return false, nil
}
}
if info.EnablePVReclaim {
- glog.V(4).Infof("check reclaim pvs success when scale in pd or tikv")
+ klog.V(4).Infof("check reclaim pvs success when scale in pd or tikv")
if b, err := oa.checkReclaimPVSuccess(tc); !b && err == nil {
return false, nil
}
}
return true, nil
}); err != nil {
- glog.Errorf("check tidb cluster status failed: %s", err.Error())
+ klog.Errorf("check tidb cluster status failed: %s", err.Error())
return fmt.Errorf("failed to waiting for tidbcluster %s/%s ready in 120 minutes", ns, tcName)
}
@@ -1037,7 +1008,8 @@ func (oa *operatorActions) CheckTidbClusterStatusOrDie(info *TidbClusterConfig)
}
func (oa *operatorActions) getBlockWriterPod(info *TidbClusterConfig, database string) *corev1.Pod {
- return &corev1.Pod{
+
+ pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: info.Namespace,
Name: blockWriterPodName(info),
@@ -1067,6 +1039,10 @@ func (oa *operatorActions) getBlockWriterPod(info *TidbClusterConfig, database s
RestartPolicy: corev1.RestartPolicyAlways,
},
}
+ if info.OperatorTag != "e2e" {
+ pod.Spec.Containers[0].ImagePullPolicy = corev1.PullAlways
+ }
+ return pod
}
func (oa *operatorActions) BeginInsertDataTo(info *TidbClusterConfig) error {
@@ -1082,6 +1058,7 @@ func (oa *operatorActions) BeginInsertDataTo(info *TidbClusterConfig) error {
if err != nil {
return err
}
+ klog.Infof("begin insert Data in pod[%s/%s]", pod.Namespace, pod.Name)
return nil
}
@@ -1098,16 +1075,20 @@ func (oa *operatorActions) StopInsertDataTo(info *TidbClusterConfig) {
}
oa.EmitEvent(info, "StopInsertData")
- pod := info.blockWriterPod
- err := oa.kubeCli.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{})
- if err != nil {
- slack.NotifyAndPanic(err)
- }
- err = e2epod.WaitForPodNotFoundInNamespace(oa.kubeCli, pod.Name, pod.Namespace, time.Minute*5)
+ err := wait.Poll(5*time.Second, 5*time.Minute, func() (done bool, err error) {
+ pod := info.blockWriterPod
+ err = oa.kubeCli.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{})
+ if err != nil {
+ if errors.IsNotFound(err) {
+ return true, nil
+ }
+ return false, nil
+ }
+ return true, nil
+ })
if err != nil {
slack.NotifyAndPanic(err)
}
-
info.blockWriterPod = nil
}
@@ -1143,7 +1124,7 @@ func (oa *operatorActions) ScaleTidbCluster(info *TidbClusterConfig) error {
if err != nil {
return err
}
- glog.Info("[SCALE] " + cmd)
+ klog.Info("[SCALE] " + cmd)
res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput()
if err != nil {
return pingcapErrors.Wrapf(err, "failed to scale tidb cluster: %s", string(res))
@@ -1161,32 +1142,32 @@ func (oa *operatorActions) CheckScaleInSafely(info *TidbClusterConfig) error {
return wait.Poll(oa.pollInterval, DefaultPollTimeout, func() (done bool, err error) {
tc, err := oa.cli.PingcapV1alpha1().TidbClusters(info.Namespace).Get(info.ClusterName, metav1.GetOptions{})
if err != nil {
- glog.Infof("failed to get tidbcluster when scale in tidbcluster, error: %v", err)
+ klog.Infof("failed to get tidbcluster when scale in tidbcluster, error: %v", err)
return false, nil
}
tikvSetName := controller.TiKVMemberName(info.ClusterName)
tikvSet, err := oa.tcStsGetter.StatefulSets(info.Namespace).Get(tikvSetName, metav1.GetOptions{})
if err != nil {
- glog.Infof("failed to get tikvSet statefulset: [%s], error: %v", tikvSetName, err)
+ klog.Infof("failed to get tikvSet statefulset: [%s], error: %v", tikvSetName, err)
return false, nil
}
pdClient, cancel, err := oa.getPDClient(tc)
if err != nil {
- glog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err)
+ klog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err)
return false, nil
}
defer cancel()
stores, err := pdClient.GetStores()
if err != nil {
- glog.Infof("pdClient.GetStores failed,error: %v", err)
+ klog.Infof("pdClient.GetStores failed,error: %v", err)
return false, nil
}
if len(stores.Stores) > int(*tikvSet.Spec.Replicas) {
- glog.Infof("stores.Stores: %v", stores.Stores)
- glog.Infof("tikvSet.Spec.Replicas: %d", *tikvSet.Spec.Replicas)
+ klog.Infof("stores.Stores: %v", stores.Stores)
+ klog.Infof("tikvSet.Spec.Replicas: %d", *tikvSet.Spec.Replicas)
return false, fmt.Errorf("the tikvSet.Spec.Replicas may reduce before tikv complete offline")
}
@@ -1202,7 +1183,7 @@ func (oa *operatorActions) CheckScaledCorrectly(info *TidbClusterConfig, podUIDs
return wait.Poll(oa.pollInterval, DefaultPollTimeout, func() (done bool, err error) {
podUIDs, err := oa.GetPodUIDMap(info)
if err != nil {
- glog.Infof("failed to get pd pods's uid, error: %v", err)
+ klog.Infof("failed to get pd pods's uid, error: %v", err)
return false, nil
}
@@ -1224,7 +1205,7 @@ func (oa *operatorActions) setPartitionAnnotation(namespace, tcName, component s
// add annotation to pause statefulset upgrade process
cmd := fmt.Sprintf("kubectl annotate tc %s -n %s tidb.pingcap.com/%s-partition=%d --overwrite",
tcName, namespace, component, ordinal)
- glog.Infof("%s", cmd)
+ klog.Infof("%s", cmd)
output, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput()
if err != nil {
return fmt.Errorf("fail to set annotation for [%s/%s], component: %s, partition: %d, err: %v, output: %s", namespace, tcName, component, ordinal, err, string(output))
@@ -1239,7 +1220,7 @@ func (oa *operatorActions) UpgradeTidbCluster(info *TidbClusterConfig) error {
if err != nil {
return err
}
- glog.Info("[UPGRADE] " + cmd)
+ klog.Info("[UPGRADE] " + cmd)
res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput()
if err != nil {
return pingcapErrors.Wrapf(err, "failed to upgrade tidb cluster: %s", string(res))
@@ -1290,21 +1271,21 @@ func (oa *operatorActions) CheckUpgrade(ctx context.Context, info *TidbClusterCo
scheduler := fmt.Sprintf("evict-leader-scheduler-%s", findStoreFn(tc, podName))
pdClient, cancel, err := oa.getPDClient(tc)
if err != nil {
- glog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err)
+ klog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err)
return false, nil
}
defer cancel()
schedulers, err := pdClient.GetEvictLeaderSchedulers()
if err != nil {
- glog.Errorf("failed to get evict leader schedulers, %v", err)
+ klog.Errorf("failed to get evict leader schedulers, %v", err)
return false, nil
}
- glog.V(4).Infof("index:%d,schedulers:%v,error:%v", i, schedulers, err)
+ klog.V(4).Infof("index:%d,schedulers:%v,error:%v", i, schedulers, err)
if len(schedulers) > 1 {
- glog.Errorf("there are too many evict leader schedulers: %v", schedulers)
+ klog.Errorf("there are too many evict leader schedulers: %v", schedulers)
for _, s := range schedulers {
if s == scheduler {
- glog.Infof("found scheudler: %s", scheduler)
+ klog.Infof("found scheudler: %s", scheduler)
return true, nil
}
}
@@ -1314,14 +1295,14 @@ func (oa *operatorActions) CheckUpgrade(ctx context.Context, info *TidbClusterCo
return false, nil
}
if schedulers[0] == scheduler {
- glog.Infof("index: %d,the schedulers: %s = %s", i, schedulers[0], scheduler)
+ klog.Infof("index: %d,the schedulers: %s = %s", i, schedulers[0], scheduler)
return true, nil
}
- glog.Errorf("index: %d,the scheduler: %s != %s", i, schedulers[0], scheduler)
+ klog.Errorf("index: %d,the scheduler: %s != %s", i, schedulers[0], scheduler)
return false, nil
})
if err != nil {
- glog.Errorf("failed to check upgrade %s/%s, %v", ns, tcName, err)
+ klog.Errorf("failed to check upgrade %s/%s, %v", ns, tcName, err)
return err
}
}
@@ -1345,19 +1326,19 @@ func (oa *operatorActions) CheckUpgrade(ctx context.Context, info *TidbClusterCo
return wait.PollImmediate(1*time.Second, 6*time.Minute, func() (done bool, err error) {
pdClient, cancel, err := oa.getPDClient(tc)
if err != nil {
- glog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err)
+ klog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err)
return false, nil
}
defer cancel()
schedulers, err := pdClient.GetEvictLeaderSchedulers()
if err != nil {
- glog.Errorf("failed to get evict leader schedulers, %v", err)
+ klog.Errorf("failed to get evict leader schedulers, %v", err)
return false, nil
}
if len(schedulers) == 0 {
return true, nil
}
- glog.Errorf("schedulers: %v is not empty", schedulers)
+ klog.Errorf("schedulers: %v is not empty", schedulers)
return false, nil
})
}
@@ -1375,7 +1356,7 @@ func (oa *operatorActions) CleanMonitor(info *TidbClusterConfig) error { return
func getMemberContainer(kubeCli kubernetes.Interface, stsGetter typedappsv1.StatefulSetsGetter, namespace, tcName, component string) (*corev1.Container, bool) {
sts, err := stsGetter.StatefulSets(namespace).Get(fmt.Sprintf("%s-%s", tcName, component), metav1.GetOptions{})
if err != nil {
- glog.Errorf("failed to get sts for component %s of cluster %s/%s", component, namespace, tcName)
+ klog.Errorf("failed to get sts for component %s of cluster %s/%s", component, namespace, tcName)
return nil, false
}
listOption := metav1.ListOptions{
@@ -1383,16 +1364,16 @@ func getMemberContainer(kubeCli kubernetes.Interface, stsGetter typedappsv1.Stat
}
podList, err := kubeCli.CoreV1().Pods(namespace).List(listOption)
if err != nil {
- glog.Errorf("fail to get pods for component %s of cluster %s/%s", component, namespace, tcName)
+ klog.Errorf("fail to get pods for component %s of cluster %s/%s", component, namespace, tcName)
return nil, false
}
if len(podList.Items) == 0 {
- glog.Errorf("no pods found for component %s of cluster %s/%s", component, namespace, tcName)
+ klog.Errorf("no pods found for component %s of cluster %s/%s", component, namespace, tcName)
return nil, false
}
pod := podList.Items[0]
if len(pod.Spec.Containers) == 0 {
- glog.Errorf("no containers found for component %s of cluster %s/%s", component, namespace, tcName)
+ klog.Errorf("no containers found for component %s of cluster %s/%s", component, namespace, tcName)
return nil, false
}
@@ -1413,7 +1394,7 @@ func (oa *operatorActions) pdMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, err
pdSet, err := oa.tcStsGetter.StatefulSets(ns).Get(pdSetName, metav1.GetOptions{})
if err != nil {
- glog.Errorf("failed to get statefulset: %s/%s, %v", ns, pdSetName, err)
+ klog.Errorf("failed to get statefulset: %s/%s, %v", ns, pdSetName, err)
return false, nil
}
@@ -1422,48 +1403,48 @@ func (oa *operatorActions) pdMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, err
}
if tc.Status.PD.StatefulSet == nil {
- glog.Infof("tidbcluster: %s/%s .status.PD.StatefulSet is nil", ns, tcName)
+ klog.Infof("tidbcluster: %s/%s .status.PD.StatefulSet is nil", ns, tcName)
return false, nil
}
failureCount := len(tc.Status.PD.FailureMembers)
replicas := tc.Spec.PD.Replicas + int32(failureCount)
if *pdSet.Spec.Replicas != replicas {
- glog.Infof("statefulset: %s/%s .spec.Replicas(%d) != %d",
+ klog.Infof("statefulset: %s/%s .spec.Replicas(%d) != %d",
ns, pdSetName, *pdSet.Spec.Replicas, replicas)
return false, nil
}
if pdSet.Status.ReadyReplicas != tc.Spec.PD.Replicas {
- glog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != %d",
+ klog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != %d",
ns, pdSetName, pdSet.Status.ReadyReplicas, tc.Spec.PD.Replicas)
return false, nil
}
if len(tc.Status.PD.Members) != int(tc.Spec.PD.Replicas) {
- glog.Infof("tidbcluster: %s/%s .status.PD.Members count(%d) != %d",
+ klog.Infof("tidbcluster: %s/%s .status.PD.Members count(%d) != %d",
ns, tcName, len(tc.Status.PD.Members), tc.Spec.PD.Replicas)
return false, nil
}
if pdSet.Status.ReadyReplicas != pdSet.Status.Replicas {
- glog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != .status.Replicas(%d)",
+ klog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != .status.Replicas(%d)",
ns, pdSetName, pdSet.Status.ReadyReplicas, pdSet.Status.Replicas)
return false, nil
}
c, found := getMemberContainer(oa.kubeCli, oa.tcStsGetter, ns, tc.Name, label.PDLabelVal)
if !found {
- glog.Infof("statefulset: %s/%s not found containers[name=pd] or pod %s-0",
+ klog.Infof("statefulset: %s/%s not found containers[name=pd] or pod %s-0",
ns, pdSetName, pdSetName)
return false, nil
}
if tc.PDImage() != c.Image {
- glog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=pd].image(%s) != %s",
+ klog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=pd].image(%s) != %s",
ns, pdSetName, c.Image, tc.PDImage())
return false, nil
}
for _, member := range tc.Status.PD.Members {
if !member.Health {
- glog.Infof("tidbcluster: %s/%s pd member(%s/%s) is not health",
+ klog.Infof("tidbcluster: %s/%s pd member(%s/%s) is not health",
ns, tcName, member.ID, member.Name)
return false, nil
}
@@ -1472,11 +1453,11 @@ func (oa *operatorActions) pdMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, err
pdServiceName := controller.PDMemberName(tcName)
pdPeerServiceName := controller.PDPeerMemberName(tcName)
if _, err := oa.kubeCli.CoreV1().Services(ns).Get(pdServiceName, metav1.GetOptions{}); err != nil {
- glog.Errorf("failed to get service: %s/%s", ns, pdServiceName)
+ klog.Errorf("failed to get service: %s/%s", ns, pdServiceName)
return false, nil
}
if _, err := oa.kubeCli.CoreV1().Services(ns).Get(pdPeerServiceName, metav1.GetOptions{}); err != nil {
- glog.Errorf("failed to get peer service: %s/%s", ns, pdPeerServiceName)
+ klog.Errorf("failed to get peer service: %s/%s", ns, pdPeerServiceName)
return false, nil
}
@@ -1490,7 +1471,7 @@ func (oa *operatorActions) tikvMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, e
tikvSet, err := oa.tcStsGetter.StatefulSets(ns).Get(tikvSetName, metav1.GetOptions{})
if err != nil {
- glog.Errorf("failed to get statefulset: %s/%s, %v", ns, tikvSetName, err)
+ klog.Errorf("failed to get statefulset: %s/%s, %v", ns, tikvSetName, err)
return false, nil
}
@@ -1499,55 +1480,55 @@ func (oa *operatorActions) tikvMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, e
}
if tc.Status.TiKV.StatefulSet == nil {
- glog.Infof("tidbcluster: %s/%s .status.TiKV.StatefulSet is nil", ns, tcName)
+ klog.Infof("tidbcluster: %s/%s .status.TiKV.StatefulSet is nil", ns, tcName)
return false, nil
}
failureCount := len(tc.Status.TiKV.FailureStores)
replicas := tc.Spec.TiKV.Replicas + int32(failureCount)
if *tikvSet.Spec.Replicas != replicas {
- glog.Infof("statefulset: %s/%s .spec.Replicas(%d) != %d",
+ klog.Infof("statefulset: %s/%s .spec.Replicas(%d) != %d",
ns, tikvSetName, *tikvSet.Spec.Replicas, replicas)
return false, nil
}
if tikvSet.Status.ReadyReplicas != replicas {
- glog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != %d",
+ klog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != %d",
ns, tikvSetName, tikvSet.Status.ReadyReplicas, replicas)
return false, nil
}
if len(tc.Status.TiKV.Stores) != int(replicas) {
- glog.Infof("tidbcluster: %s/%s .status.TiKV.Stores.count(%d) != %d",
+ klog.Infof("tidbcluster: %s/%s .status.TiKV.Stores.count(%d) != %d",
ns, tcName, len(tc.Status.TiKV.Stores), replicas)
return false, nil
}
if tikvSet.Status.ReadyReplicas != tikvSet.Status.Replicas {
- glog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != .status.Replicas(%d)",
+ klog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != .status.Replicas(%d)",
ns, tikvSetName, tikvSet.Status.ReadyReplicas, tikvSet.Status.Replicas)
return false, nil
}
c, found := getMemberContainer(oa.kubeCli, oa.tcStsGetter, ns, tc.Name, label.TiKVLabelVal)
if !found {
- glog.Infof("statefulset: %s/%s not found containers[name=tikv] or pod %s-0",
+ klog.Infof("statefulset: %s/%s not found containers[name=tikv] or pod %s-0",
ns, tikvSetName, tikvSetName)
return false, nil
}
if tc.TiKVImage() != c.Image {
- glog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=tikv].image(%s) != %s",
+ klog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=tikv].image(%s) != %s",
ns, tikvSetName, c.Image, tc.TiKVImage())
return false, nil
}
for _, store := range tc.Status.TiKV.Stores {
if store.State != v1alpha1.TiKVStateUp {
- glog.Infof("tidbcluster: %s/%s's store(%s) state != %s", ns, tcName, store.ID, v1alpha1.TiKVStateUp)
+ klog.Infof("tidbcluster: %s/%s's store(%s) state != %s", ns, tcName, store.ID, v1alpha1.TiKVStateUp)
return false, nil
}
}
tikvPeerServiceName := controller.TiKVPeerMemberName(tcName)
if _, err := oa.kubeCli.CoreV1().Services(ns).Get(tikvPeerServiceName, metav1.GetOptions{}); err != nil {
- glog.Errorf("failed to get peer service: %s/%s", ns, tikvPeerServiceName)
+ klog.Errorf("failed to get peer service: %s/%s", ns, tikvPeerServiceName)
return false, nil
}
@@ -1561,7 +1542,7 @@ func (oa *operatorActions) tidbMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, e
tidbSet, err := oa.tcStsGetter.StatefulSets(ns).Get(tidbSetName, metav1.GetOptions{})
if err != nil {
- glog.Errorf("failed to get statefulset: %s/%s, %v", ns, tidbSetName, err)
+ klog.Errorf("failed to get statefulset: %s/%s, %v", ns, tidbSetName, err)
return false, nil
}
@@ -1570,53 +1551,53 @@ func (oa *operatorActions) tidbMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, e
}
if tc.Status.TiDB.StatefulSet == nil {
- glog.Infof("tidbcluster: %s/%s .status.TiDB.StatefulSet is nil", ns, tcName)
+ klog.Infof("tidbcluster: %s/%s .status.TiDB.StatefulSet is nil", ns, tcName)
return false, nil
}
failureCount := len(tc.Status.TiDB.FailureMembers)
replicas := tc.Spec.TiDB.Replicas + int32(failureCount)
if *tidbSet.Spec.Replicas != replicas {
- glog.Infof("statefulset: %s/%s .spec.Replicas(%d) != %d",
+ klog.Infof("statefulset: %s/%s .spec.Replicas(%d) != %d",
ns, tidbSetName, *tidbSet.Spec.Replicas, replicas)
return false, nil
}
if tidbSet.Status.ReadyReplicas != tc.Spec.TiDB.Replicas {
- glog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != %d",
+ klog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != %d",
ns, tidbSetName, tidbSet.Status.ReadyReplicas, tc.Spec.TiDB.Replicas)
return false, nil
}
if len(tc.Status.TiDB.Members) != int(tc.Spec.TiDB.Replicas) {
- glog.Infof("tidbcluster: %s/%s .status.TiDB.Members count(%d) != %d",
+ klog.Infof("tidbcluster: %s/%s .status.TiDB.Members count(%d) != %d",
ns, tcName, len(tc.Status.TiDB.Members), tc.Spec.TiDB.Replicas)
return false, nil
}
if tidbSet.Status.ReadyReplicas != tidbSet.Status.Replicas {
- glog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != .status.Replicas(%d)",
+ klog.Infof("statefulset: %s/%s .status.ReadyReplicas(%d) != .status.Replicas(%d)",
ns, tidbSetName, tidbSet.Status.ReadyReplicas, tidbSet.Status.Replicas)
return false, nil
}
c, found := getMemberContainer(oa.kubeCli, oa.tcStsGetter, ns, tc.Name, label.TiDBLabelVal)
if !found {
- glog.Infof("statefulset: %s/%s not found containers[name=tidb] or pod %s-0",
+ klog.Infof("statefulset: %s/%s not found containers[name=tidb] or pod %s-0",
ns, tidbSetName, tidbSetName)
return false, nil
}
if tc.TiDBImage() != c.Image {
- glog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=tidb].image(%s) != %s",
+ klog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=tidb].image(%s) != %s",
ns, tidbSetName, c.Image, tc.TiDBImage())
return false, nil
}
_, err = oa.kubeCli.CoreV1().Services(ns).Get(tidbSetName, metav1.GetOptions{})
if err != nil {
- glog.Errorf("failed to get service: %s/%s", ns, tidbSetName)
+ klog.Errorf("failed to get service: %s/%s", ns, tidbSetName)
return false, nil
}
_, err = oa.kubeCli.CoreV1().Services(ns).Get(controller.TiDBPeerMemberName(tcName), metav1.GetOptions{})
if err != nil {
- glog.Errorf("failed to get peer service: %s/%s", ns, controller.TiDBPeerMemberName(tcName))
+ klog.Errorf("failed to get peer service: %s/%s", ns, controller.TiDBPeerMemberName(tcName))
return false, nil
}
@@ -1634,17 +1615,17 @@ func (oa *operatorActions) reclaimPolicySyncFn(tc *v1alpha1.TidbCluster) (bool,
var pvcList *corev1.PersistentVolumeClaimList
var err error
if pvcList, err = oa.kubeCli.CoreV1().PersistentVolumeClaims(ns).List(listOptions); err != nil {
- glog.Errorf("failed to list pvs for tidbcluster %s/%s, %v", ns, tcName, err)
+ klog.Errorf("failed to list pvs for tidbcluster %s/%s, %v", ns, tcName, err)
return false, nil
}
for _, pvc := range pvcList.Items {
pvName := pvc.Spec.VolumeName
if pv, err := oa.kubeCli.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}); err != nil {
- glog.Errorf("failed to get pv: %s, error: %v", pvName, err)
+ klog.Errorf("failed to get pv: %s, error: %v", pvName, err)
return false, nil
} else if pv.Spec.PersistentVolumeReclaimPolicy != tc.Spec.PVReclaimPolicy {
- glog.Errorf("pv: %s's reclaimPolicy is not Retain", pvName)
+ klog.Errorf("pv: %s's reclaimPolicy is not Retain", pvName)
return false, nil
}
}
@@ -1658,13 +1639,13 @@ func (oa *operatorActions) metaSyncFn(tc *v1alpha1.TidbCluster) (bool, error) {
pdClient, cancel, err := oa.getPDClient(tc)
if err != nil {
- glog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err)
+ klog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err)
return false, nil
}
defer cancel()
var cluster *metapb.Cluster
if cluster, err = pdClient.GetCluster(); err != nil {
- glog.Errorf("failed to get cluster from pdControl: %s/%s, error: %v", ns, tcName, err)
+ klog.Errorf("failed to get cluster from pdControl: %s/%s, error: %v", ns, tcName, err)
return false, nil
}
@@ -1677,7 +1658,7 @@ func (oa *operatorActions) metaSyncFn(tc *v1alpha1.TidbCluster) (bool, error) {
var podList *corev1.PodList
if podList, err = oa.kubeCli.CoreV1().Pods(ns).List(listOptions); err != nil {
- glog.Errorf("failed to list pods for tidbcluster %s/%s, %v", ns, tcName, err)
+ klog.Errorf("failed to list pods for tidbcluster %s/%s, %v", ns, tcName, err)
return false, nil
}
@@ -1685,7 +1666,7 @@ outerLoop:
for _, pod := range podList.Items {
podName := pod.GetName()
if pod.Labels[label.ClusterIDLabelKey] != clusterID {
- glog.Infof("tidbcluster %s/%s's pod %s's label %s not equals %s ",
+ klog.Infof("tidbcluster %s/%s's pod %s's label %s not equals %s ",
ns, tcName, podName, label.ClusterIDLabelKey, clusterID)
return false, nil
}
@@ -1696,7 +1677,7 @@ outerLoop:
var memberID string
members, err := pdClient.GetMembers()
if err != nil {
- glog.Errorf("failed to get members for tidbcluster %s/%s, %v", ns, tcName, err)
+ klog.Errorf("failed to get members for tidbcluster %s/%s, %v", ns, tcName, err)
return false, nil
}
for _, member := range members.Members {
@@ -1706,7 +1687,7 @@ outerLoop:
}
}
if memberID == "" {
- glog.Errorf("tidbcluster: %s/%s's pod %s label [%s] is empty",
+ klog.Errorf("tidbcluster: %s/%s's pod %s label [%s] is empty",
ns, tcName, podName, label.MemberIDLabelKey)
return false, nil
}
@@ -1718,7 +1699,7 @@ outerLoop:
var storeID string
stores, err := pdClient.GetStores()
if err != nil {
- glog.Errorf("failed to get stores for tidbcluster %s/%s, %v", ns, tcName, err)
+ klog.Errorf("failed to get stores for tidbcluster %s/%s, %v", ns, tcName, err)
return false, nil
}
for _, store := range stores.Stores {
@@ -1729,7 +1710,7 @@ outerLoop:
}
}
if storeID == "" {
- glog.Errorf("tidbcluster: %s/%s's pod %s label [%s] is empty",
+ klog.Errorf("tidbcluster: %s/%s's pod %s label [%s] is empty",
tc.GetNamespace(), tc.GetName(), podName, label.StoreIDLabelKey)
return false, nil
}
@@ -1756,7 +1737,7 @@ outerLoop:
var pvc *corev1.PersistentVolumeClaim
if pvc, err = oa.kubeCli.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{}); err != nil {
- glog.Errorf("failed to get pvc %s/%s for pod %s/%s", ns, pvcName, ns, podName)
+ klog.Errorf("failed to get pvc %s/%s for pod %s/%s", ns, pvcName, ns, podName)
return false, nil
}
if pvc.Labels[label.ClusterIDLabelKey] != clusterID {
@@ -1783,7 +1764,7 @@ outerLoop:
pvName := pvc.Spec.VolumeName
var pv *corev1.PersistentVolume
if pv, err = oa.kubeCli.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}); err != nil {
- glog.Errorf("failed to get pv for pvc %s/%s, %v", ns, pvcName, err)
+ klog.Errorf("failed to get pv for pvc %s/%s, %v", ns, pvcName, err)
return false, nil
}
if pv.Labels[label.NamespaceLabelKey] != ns {
@@ -1852,7 +1833,7 @@ func (oa *operatorActions) schedulerHAFn(tc *v1alpha1.TidbCluster) (bool, error)
var podList *corev1.PodList
var err error
if podList, err = oa.kubeCli.CoreV1().Pods(ns).List(listOptions); err != nil {
- glog.Errorf("failed to list pods for tidbcluster %s/%s, %v", ns, tcName, err)
+ klog.Errorf("failed to list pods for tidbcluster %s/%s, %v", ns, tcName, err)
return false, nil
}
@@ -1894,7 +1875,7 @@ func (oa *operatorActions) podsScheduleAnnHaveDeleted(tc *v1alpha1.TidbCluster)
pvcList, err := oa.kubeCli.CoreV1().PersistentVolumeClaims(ns).List(listOptions)
if err != nil {
- glog.Errorf("failed to list pvcs for tidb cluster %s/%s, err: %v", ns, tcName, err)
+ klog.Errorf("failed to list pvcs for tidb cluster %s/%s, err: %v", ns, tcName, err)
return false, nil
}
@@ -1906,7 +1887,7 @@ func (oa *operatorActions) podsScheduleAnnHaveDeleted(tc *v1alpha1.TidbCluster)
}
if _, exist := pvc.Annotations[label.AnnPVCPodScheduling]; exist {
- glog.Errorf("tidb cluster %s/%s pvc %s has pod scheduling annotation", ns, tcName, pvcName)
+ klog.Errorf("tidb cluster %s/%s pvc %s has pod scheduling annotation", ns, tcName, pvcName)
return false, nil
}
}
@@ -1917,13 +1898,13 @@ func (oa *operatorActions) podsScheduleAnnHaveDeleted(tc *v1alpha1.TidbCluster)
func (oa *operatorActions) checkReclaimPVSuccess(tc *v1alpha1.TidbCluster) (bool, error) {
// check pv reclaim for pd
if err := oa.checkComponentReclaimPVSuccess(tc, label.PDLabelVal); err != nil {
- glog.Errorf(err.Error())
+ klog.Errorf(err.Error())
return false, nil
}
// check pv reclaim for tikv
if err := oa.checkComponentReclaimPVSuccess(tc, label.TiKVLabelVal); err != nil {
- glog.Errorf(err.Error())
+ klog.Errorf(err.Error())
return false, nil
}
return true, nil
@@ -2001,7 +1982,7 @@ func (oa *operatorActions) getComponentPVList(tc *v1alpha1.TidbCluster, componen
func (oa *operatorActions) storeLabelsIsSet(tc *v1alpha1.TidbCluster, topologyKey string) (bool, error) {
pdClient, cancel, err := oa.getPDClient(tc)
if err != nil {
- glog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err)
+ klog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err)
return false, nil
}
defer cancel()
@@ -2034,28 +2015,28 @@ func (oa *operatorActions) passwordIsSet(clusterInfo *TidbClusterConfig) (bool,
var job *batchv1.Job
var err error
if job, err = oa.kubeCli.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}); err != nil {
- glog.Errorf("failed to get job %s/%s, %v", ns, jobName, err)
+ klog.Errorf("failed to get job %s/%s, %v", ns, jobName, err)
return false, nil
}
if job.Status.Succeeded < 1 {
- glog.Errorf("tidbcluster: %s/%s password setter job not finished", ns, tcName)
+ klog.Errorf("tidbcluster: %s/%s password setter job not finished", ns, tcName)
return false, nil
}
var db *sql.DB
dsn, cancel, err := oa.getTiDBDSN(ns, tcName, "test", clusterInfo.Password)
if err != nil {
- glog.Errorf("failed to get TiDB DSN: %v", err)
+ klog.Errorf("failed to get TiDB DSN: %v", err)
return false, nil
}
defer cancel()
if db, err = sql.Open("mysql", dsn); err != nil {
- glog.Errorf("can't open connection to mysql: %s, %v", dsn, err)
+ klog.Errorf("can't open connection to mysql: %s, %v", dsn, err)
return false, nil
}
defer db.Close()
if err := db.Ping(); err != nil {
- glog.Errorf("can't connect to mysql: %s with password %s, %v", dsn, clusterInfo.Password, err)
+ klog.Errorf("can't connect to mysql: %s with password %s, %v", dsn, clusterInfo.Password, err)
return false, nil
}
@@ -2068,20 +2049,20 @@ func (oa *operatorActions) monitorNormal(clusterInfo *TidbClusterConfig) (bool,
monitorDeploymentName := fmt.Sprintf("%s-monitor", tcName)
monitorDeployment, err := oa.kubeCli.AppsV1().Deployments(ns).Get(monitorDeploymentName, metav1.GetOptions{})
if err != nil {
- glog.Errorf("get monitor deployment: [%s/%s] failed", ns, monitorDeploymentName)
+ klog.Errorf("get monitor deployment: [%s/%s] failed", ns, monitorDeploymentName)
return false, nil
}
if monitorDeployment.Status.ReadyReplicas < 1 {
- glog.Infof("monitor ready replicas %d < 1", monitorDeployment.Status.ReadyReplicas)
+ klog.Infof("monitor ready replicas %d < 1", monitorDeployment.Status.ReadyReplicas)
return false, nil
}
if err := oa.checkPrometheus(clusterInfo); err != nil {
- glog.Infof("check [%s/%s]'s prometheus data failed: %v", ns, monitorDeploymentName, err)
+ klog.Infof("check [%s/%s]'s prometheus data failed: %v", ns, monitorDeploymentName, err)
return false, nil
}
if err := oa.checkGrafanaData(clusterInfo); err != nil {
- glog.Infof("check [%s/%s]'s grafana data failed: %v", ns, monitorDeploymentName, err)
+ klog.Infof("check [%s/%s]'s grafana data failed: %v", ns, monitorDeploymentName, err)
return false, nil
}
return true, nil
@@ -2103,17 +2084,17 @@ func (oa *operatorActions) checkTidbClusterConfigUpdated(tc *v1alpha1.TidbCluste
func (oa *operatorActions) checkPdConfigUpdated(tc *v1alpha1.TidbCluster, clusterInfo *TidbClusterConfig) bool {
pdClient, cancel, err := oa.getPDClient(tc)
if err != nil {
- glog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err)
+ klog.Errorf("Failed to create external PD client for tidb cluster %q: %v", tc.GetName(), err)
return false
}
defer cancel()
config, err := pdClient.GetConfig()
if err != nil {
- glog.Errorf("failed to get PD configuraion from tidb cluster [%s/%s]", tc.Namespace, tc.Name)
+ klog.Errorf("failed to get PD configuraion from tidb cluster [%s/%s]", tc.Namespace, tc.Name)
return false
}
if len(clusterInfo.PDLogLevel) > 0 && clusterInfo.PDLogLevel != config.Log.Level {
- glog.Errorf("check [%s/%s] PD logLevel configuration updated failed: desired [%s], actual [%s] not equal",
+ klog.Errorf("check [%s/%s] PD logLevel configuration updated failed: desired [%s], actual [%s] not equal",
tc.Namespace,
tc.Name,
clusterInfo.PDLogLevel,
@@ -2122,7 +2103,7 @@ func (oa *operatorActions) checkPdConfigUpdated(tc *v1alpha1.TidbCluster, cluste
}
// TODO: fix #487 PD configuration update for persisted configurations
//if clusterInfo.PDMaxReplicas > 0 && config.Replication.MaxReplicas != uint64(clusterInfo.PDMaxReplicas) {
- // glog.Errorf("check [%s/%s] PD maxReplicas configuration updated failed: desired [%d], actual [%d] not equal",
+ // klog.Errorf("check [%s/%s] PD maxReplicas configuration updated failed: desired [%d], actual [%d] not equal",
// tc.Namespace,
// tc.Name,
// clusterInfo.PDMaxReplicas,
@@ -2135,17 +2116,17 @@ func (oa *operatorActions) checkPdConfigUpdated(tc *v1alpha1.TidbCluster, cluste
func (oa *operatorActions) checkTiDBConfigUpdated(tc *v1alpha1.TidbCluster, clusterInfo *TidbClusterConfig) bool {
ordinals, err := util.GetPodOrdinals(tc, v1alpha1.TiDBMemberType)
if err != nil {
- glog.Errorf("failed to get pod ordinals for tidb cluster %s/%s (member: %v)", tc.Namespace, tc.Name, v1alpha1.TiDBMemberType)
+ klog.Errorf("failed to get pod ordinals for tidb cluster %s/%s (member: %v)", tc.Namespace, tc.Name, v1alpha1.TiDBMemberType)
return false
}
for i := range ordinals {
config, err := oa.tidbControl.GetSettings(tc, int32(i))
if err != nil {
- glog.Errorf("failed to get TiDB configuration from cluster [%s/%s], ordinal: %d, error: %v", tc.Namespace, tc.Name, i, err)
+ klog.Errorf("failed to get TiDB configuration from cluster [%s/%s], ordinal: %d, error: %v", tc.Namespace, tc.Name, i, err)
return false
}
if clusterInfo.TiDBTokenLimit > 0 && uint(clusterInfo.TiDBTokenLimit) != config.TokenLimit {
- glog.Errorf("check [%s/%s] TiDB instance [%d] configuration updated failed: desired [%d], actual [%d] not equal",
+ klog.Errorf("check [%s/%s] TiDB instance [%d] configuration updated failed: desired [%d], actual [%d] not equal",
tc.Namespace, tc.Name, i, clusterInfo.TiDBTokenLimit, config.TokenLimit)
return false
}
@@ -2193,7 +2174,7 @@ func getDatasourceID(addr string) (int, error) {
defer func() {
err := resp.Body.Close()
if err != nil {
- glog.Warningf("close response failed, err: %v", err)
+ klog.Warningf("close response failed, err: %v", err)
}
}()
@@ -2234,7 +2215,7 @@ func notFound(res string) bool {
func (oa *operatorActions) cloneOperatorRepo() error {
cmd := fmt.Sprintf("git clone %s %s", oa.cfg.OperatorRepoUrl, oa.cfg.OperatorRepoDir)
- glog.Info(cmd)
+ klog.Info(cmd)
res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput()
if err != nil && !strings.Contains(string(res), "already exists") {
return fmt.Errorf("failed to clone tidb-operator repository: %v, %s", err, string(res))
@@ -2255,7 +2236,7 @@ func (oa *operatorActions) checkoutTag(tagName string) error {
if tagName != "v1.0.0" {
cmd = cmd + fmt.Sprintf(" && cp -rf charts/tidb-drainer %s", oa.drainerChartPath(tagName))
}
- glog.Info(cmd)
+ klog.Info(cmd)
res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput()
if err != nil {
return fmt.Errorf("failed to check tag: %s, %v, %s", tagName, err, string(res))
@@ -2266,7 +2247,7 @@ func (oa *operatorActions) checkoutTag(tagName string) error {
func (oa *operatorActions) DeployAdHocBackup(info *TidbClusterConfig) error {
oa.EmitEvent(info, "DeployAdHocBackup")
- glog.Infof("begin to deploy adhoc backup cluster[%s] namespace[%s]", info.ClusterName, info.Namespace)
+ klog.Infof("begin to deploy adhoc backup cluster[%s] namespace[%s]", info.ClusterName, info.Namespace)
var tsStr string
getTSFn := func() (bool, error) {
@@ -2275,7 +2256,7 @@ func (oa *operatorActions) DeployAdHocBackup(info *TidbClusterConfig) error {
if oa.fw != nil {
localHost, localPort, cancel, err := portforward.ForwardOnePort(oa.fw, info.Namespace, fmt.Sprintf("svc/%s-tidb", info.ClusterName), 4000)
if err != nil {
- glog.Errorf("failed to forward port %d for %s/%s", 4000, info.Namespace, info.ClusterName)
+ klog.Errorf("failed to forward port %d for %s/%s", 4000, info.Namespace, info.ClusterName)
return false, nil
}
defer cancel()
@@ -2295,11 +2276,11 @@ func (oa *operatorActions) DeployAdHocBackup(info *TidbClusterConfig) error {
mysqlHost,
mysqlPort,
)
- glog.Info(getTSCmd)
+ klog.Info(getTSCmd)
- res, err := exec.Command("/bin/sh", "-c", getTSCmd).CombinedOutput()
+ res, err := exec.Command("/bin/bash", "-c", getTSCmd).CombinedOutput()
if err != nil {
- glog.Errorf("failed to get ts %v, %s", err, string(res))
+ klog.Errorf("failed to get ts %v, %s", err, string(res))
return false, nil
}
tsStr = string(res)
@@ -2325,7 +2306,7 @@ func (oa *operatorActions) DeployAdHocBackup(info *TidbClusterConfig) error {
fullbackupName := fmt.Sprintf("%s-backup", info.ClusterName)
cmd := fmt.Sprintf("helm install -n %s --namespace %s %s --set-string %s",
fullbackupName, info.Namespace, oa.backupChartPath(info.OperatorTag), setString)
- glog.Infof("install adhoc deployment [%s]", cmd)
+ klog.Infof("install adhoc deployment [%s]", cmd)
res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput()
if err != nil {
return fmt.Errorf("failed to launch adhoc backup job: %v, %s", err, string(res))
@@ -2335,7 +2316,7 @@ func (oa *operatorActions) DeployAdHocBackup(info *TidbClusterConfig) error {
}
func (oa *operatorActions) CheckAdHocBackup(info *TidbClusterConfig) (string, error) {
- glog.Infof("checking adhoc backup cluster[%s] namespace[%s]", info.ClusterName, info.Namespace)
+ klog.Infof("checking adhoc backup cluster[%s] namespace[%s]", info.ClusterName, info.Namespace)
ns := info.Namespace
var ts string
@@ -2343,11 +2324,11 @@ func (oa *operatorActions) CheckAdHocBackup(info *TidbClusterConfig) (string, er
fn := func() (bool, error) {
job, err := oa.kubeCli.BatchV1().Jobs(info.Namespace).Get(jobName, metav1.GetOptions{})
if err != nil {
- glog.Errorf("failed to get jobs %s ,%v", jobName, err)
+ klog.Errorf("failed to get jobs %s ,%v", jobName, err)
return false, nil
}
if job.Status.Succeeded == 0 {
- glog.Errorf("cluster [%s] back up job is not completed, please wait! ", info.ClusterName)
+ klog.Errorf("cluster [%s] back up job is not completed, please wait! ", info.ClusterName)
return false, nil
}
@@ -2356,7 +2337,7 @@ func (oa *operatorActions) CheckAdHocBackup(info *TidbClusterConfig) (string, er
}
podList, err := oa.kubeCli.CoreV1().Pods(ns).List(listOptions)
if err != nil {
- glog.Errorf("failed to list pods: %v", err)
+ klog.Errorf("failed to list pods: %v", err)
return false, nil
}
@@ -2369,23 +2350,23 @@ func (oa *operatorActions) CheckAdHocBackup(info *TidbClusterConfig) (string, er
}
}
if podName == "" {
- glog.Errorf("failed to find the ad-hoc backup: %s podName", jobName)
+ klog.Errorf("failed to find the ad-hoc backup: %s podName", jobName)
return false, nil
}
getTsCmd := fmt.Sprintf("kubectl logs -n %s %s | grep 'commitTS = ' | cut -d '=' -f2 | sed 's/ *//g'", ns, podName)
tsData, err := exec.Command("/bin/sh", "-c", getTsCmd).CombinedOutput()
if err != nil {
- glog.Errorf("failed to get ts of pod %s, %v", podName, err)
+ klog.Errorf("failed to get ts of pod %s, %v", podName, err)
return false, nil
}
if string(tsData) == "" {
- glog.Errorf("ts is empty pod %s", podName)
+ klog.Errorf("ts is empty pod %s", podName)
return false, nil
}
ts = strings.TrimSpace(string(tsData))
- glog.Infof("ad-hoc backup ts: %s", ts)
+ klog.Infof("ad-hoc backup ts: %s", ts)
return true, nil
}
@@ -2401,7 +2382,7 @@ func (oa *operatorActions) CheckAdHocBackup(info *TidbClusterConfig) (string, er
func (oa *operatorActions) Restore(from *TidbClusterConfig, to *TidbClusterConfig) error {
oa.EmitEvent(from, fmt.Sprintf("RestoreBackup: target: %s", to.ClusterName))
oa.EmitEvent(to, fmt.Sprintf("RestoreBackup: source: %s", from.ClusterName))
- glog.Infof("deploying restore, the data is from cluster[%s/%s] to cluster[%s/%s]",
+ klog.Infof("deploying restore, the data is from cluster[%s/%s] to cluster[%s/%s]",
from.Namespace, from.ClusterName, to.Namespace, to.ClusterName)
sets := map[string]string{
@@ -2417,7 +2398,7 @@ func (oa *operatorActions) Restore(from *TidbClusterConfig, to *TidbClusterConfi
restoreName := fmt.Sprintf("%s-restore", to.ClusterName)
cmd := fmt.Sprintf("helm install -n %s --namespace %s %s --set-string %s",
restoreName, to.Namespace, oa.backupChartPath(to.OperatorTag), setString)
- glog.Infof("install restore [%s]", cmd)
+ klog.Infof("install restore [%s]", cmd)
res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput()
if err != nil {
return fmt.Errorf("failed to launch restore job: %v, %s", err, string(res))
@@ -2427,23 +2408,23 @@ func (oa *operatorActions) Restore(from *TidbClusterConfig, to *TidbClusterConfi
}
func (oa *operatorActions) CheckRestore(from *TidbClusterConfig, to *TidbClusterConfig) error {
- glog.Infof("begin to check restore backup cluster[%s] namespace[%s]", from.ClusterName, from.Namespace)
+ klog.Infof("begin to check restore backup cluster[%s] namespace[%s]", from.ClusterName, from.Namespace)
jobName := fmt.Sprintf("%s-restore-%s", to.ClusterName, from.BackupName)
fn := func() (bool, error) {
job, err := oa.kubeCli.BatchV1().Jobs(to.Namespace).Get(jobName, metav1.GetOptions{})
if err != nil {
- glog.Errorf("failed to get jobs %s ,%v", jobName, err)
+ klog.Errorf("failed to get jobs %s ,%v", jobName, err)
return false, nil
}
if job.Status.Succeeded == 0 {
- glog.Errorf("cluster [%s] restore job is not completed, please wait! ", to.ClusterName)
+ klog.Errorf("cluster [%s] restore job is not completed, please wait! ", to.ClusterName)
return false, nil
}
_, err = oa.DataIsTheSameAs(to, from)
if err != nil {
// ad-hoc restore don't check the data really, just logging
- glog.Infof("check restore: %v", err)
+ klog.Infof("check restore: %v", err)
}
return true, nil
@@ -2527,7 +2508,7 @@ func (oa *operatorActions) DataIsTheSameAs(tc, otherInfo *TidbClusterConfig) (bo
otherInfo.Namespace, otherInfo.ClusterName, tableName, otherCnt)
return false, err
}
- glog.Infof("cluster %s/%s's table %s count(*) = %d and cluster %s/%s's table %s count(*) = %d",
+ klog.Infof("cluster %s/%s's table %s count(*) = %d and cluster %s/%s's table %s count(*) = %d",
tc.Namespace, tc.ClusterName, tableName, cnt,
otherInfo.Namespace, otherInfo.ClusterName, tableName, otherCnt)
}
@@ -2578,7 +2559,7 @@ func releaseIsExist(err error) bool {
func (oa *operatorActions) DeployScheduledBackup(info *TidbClusterConfig) error {
oa.EmitEvent(info, "DeploySchedulerBackup")
- glog.Infof("begin to deploy scheduled backup")
+ klog.Infof("begin to deploy scheduled backup")
cron := fmt.Sprintf("'*/1 * * * *'")
sets := map[string]string{
@@ -2596,7 +2577,7 @@ func (oa *operatorActions) DeployScheduledBackup(info *TidbClusterConfig) error
return err
}
- glog.Infof("scheduled-backup deploy [%s]", cmd)
+ klog.Infof("scheduled-backup deploy [%s]", cmd)
res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput()
if err != nil {
return fmt.Errorf("failed to launch scheduler backup job: %v, %s", err, string(res))
@@ -2605,7 +2586,7 @@ func (oa *operatorActions) DeployScheduledBackup(info *TidbClusterConfig) error
}
func (oa *operatorActions) disableScheduledBackup(info *TidbClusterConfig) error {
- glog.Infof("disabling scheduled backup")
+ klog.Infof("disabling scheduled backup")
sets := map[string]string{
"clusterName": info.ClusterName,
@@ -2617,7 +2598,7 @@ func (oa *operatorActions) disableScheduledBackup(info *TidbClusterConfig) error
return err
}
- glog.Infof("scheduled-backup disable [%s]", cmd)
+ klog.Infof("scheduled-backup disable [%s]", cmd)
res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput()
if err != nil {
return fmt.Errorf("failed to disable scheduler backup job: %v, %s", err, string(res))
@@ -2626,19 +2607,19 @@ func (oa *operatorActions) disableScheduledBackup(info *TidbClusterConfig) error
}
func (oa *operatorActions) CheckScheduledBackup(info *TidbClusterConfig) error {
- glog.Infof("checking scheduler backup for tidb cluster[%s/%s]", info.Namespace, info.ClusterName)
+ klog.Infof("checking scheduler backup for tidb cluster[%s/%s]", info.Namespace, info.ClusterName)
jobName := fmt.Sprintf("%s-scheduled-backup", info.ClusterName)
fn := func() (bool, error) {
job, err := oa.kubeCli.BatchV1beta1().CronJobs(info.Namespace).Get(jobName, metav1.GetOptions{})
if err != nil {
- glog.Errorf("failed to get cronjobs %s ,%v", jobName, err)
+ klog.Errorf("failed to get cronjobs %s ,%v", jobName, err)
return false, nil
}
jobs, err := oa.kubeCli.BatchV1().Jobs(info.Namespace).List(metav1.ListOptions{})
if err != nil {
- glog.Errorf("failed to list jobs %s ,%v", info.Namespace, err)
+ klog.Errorf("failed to list jobs %s ,%v", info.Namespace, err)
return false, nil
}
@@ -2650,7 +2631,7 @@ func (oa *operatorActions) CheckScheduledBackup(info *TidbClusterConfig) error {
}
if len(backupJobs) == 0 {
- glog.Errorf("cluster [%s] scheduler jobs is creating, please wait!", info.ClusterName)
+ klog.Errorf("cluster [%s] scheduler jobs is creating, please wait!", info.ClusterName)
return false, nil
}
@@ -2666,12 +2647,12 @@ func (oa *operatorActions) CheckScheduledBackup(info *TidbClusterConfig) error {
}
if succededJobCount >= 3 {
- glog.Infof("cluster [%s/%s] scheduled back up job completed count: %d",
+ klog.Infof("cluster [%s/%s] scheduled back up job completed count: %d",
info.Namespace, info.ClusterName, succededJobCount)
return true, nil
}
- glog.Infof("cluster [%s/%s] scheduled back up job is not completed, please wait! ",
+ klog.Infof("cluster [%s/%s] scheduled back up job is not completed, please wait! ",
info.Namespace, info.ClusterName)
return false, nil
}
@@ -2704,7 +2685,7 @@ func getParentUIDFromJob(j batchv1.Job) (types.UID, bool) {
}
if controllerRef.Kind != "CronJob" {
- glog.Infof("Job with non-CronJob parent, name %s namespace %s", j.Name, j.Namespace)
+ klog.Infof("Job with non-CronJob parent, name %s namespace %s", j.Name, j.Namespace)
return types.UID(""), false
}
@@ -2762,7 +2743,7 @@ func (oa *operatorActions) getBackupDir(info *TidbClusterConfig) ([]string, erro
_, err = oa.kubeCli.CoreV1().Pods(info.Namespace).Create(pod)
if err != nil && !errors.IsAlreadyExists(err) {
- glog.Errorf("cluster: [%s/%s] create get backup dir pod failed, error :%v", info.Namespace, info.ClusterName, err)
+ klog.Errorf("cluster: [%s/%s] create get backup dir pod failed, error :%v", info.Namespace, info.ClusterName, err)
return nil, err
}
@@ -2785,12 +2766,12 @@ func (oa *operatorActions) getBackupDir(info *TidbClusterConfig) ([]string, erro
cmd := fmt.Sprintf("kubectl exec %s -n %s ls /data", backupDirPodName, info.Namespace)
res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput()
if err != nil {
- glog.Errorf("cluster:[%s/%s] exec :%s failed,error:%v,result:%s", info.Namespace, info.ClusterName, cmd, err, string(res))
+ klog.Errorf("cluster:[%s/%s] exec :%s failed,error:%v,result:%s", info.Namespace, info.ClusterName, cmd, err, string(res))
return nil, err
}
dirs := strings.Split(string(res), "\n")
- glog.Infof("dirs in pod info name [%s] dir name [%s]", scheduledPvcName, strings.Join(dirs, ","))
+ klog.Infof("dirs in pod info name [%s] dir name [%s]", scheduledPvcName, strings.Join(dirs, ","))
return dirs, nil
}
@@ -2805,11 +2786,11 @@ func (oa *operatorActions) DeployIncrementalBackup(from *TidbClusterConfig, to *
}
if withDrainer {
oa.EmitEvent(from, fmt.Sprintf("DeployIncrementalBackup: slave: %s", to.ClusterName))
- glog.Infof("begin to deploy incremental backup, source cluster[%s/%s], target cluster [%s/%s]",
+ klog.Infof("begin to deploy incremental backup, source cluster[%s/%s], target cluster [%s/%s]",
from.Namespace, from.ClusterName, to.Namespace, to.ClusterName)
} else {
oa.EmitEvent(from, "Enable pump cluster")
- glog.Infof("begin to enable pump for cluster[%s/%s]",
+ klog.Infof("begin to enable pump for cluster[%s/%s]",
from.Namespace, from.ClusterName)
}
@@ -2860,7 +2841,7 @@ func (oa *operatorActions) DeployIncrementalBackup(from *TidbClusterConfig, to *
if err != nil {
return err
}
- glog.Infof(cmd)
+ klog.Infof(cmd)
res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput()
if err != nil {
return fmt.Errorf("failed to launch incremental backup job: %v, %s", err, string(res))
@@ -2869,17 +2850,17 @@ func (oa *operatorActions) DeployIncrementalBackup(from *TidbClusterConfig, to *
}
func (oa *operatorActions) CheckIncrementalBackup(info *TidbClusterConfig, withDrainer bool) error {
- glog.Infof("begin to check incremental backup cluster[%s] namespace[%s]", info.ClusterName, info.Namespace)
+ klog.Infof("begin to check incremental backup cluster[%s] namespace[%s]", info.ClusterName, info.Namespace)
pumpStatefulSetName := fmt.Sprintf("%s-pump", info.ClusterName)
fn := func() (bool, error) {
pumpStatefulSet, err := oa.kubeCli.AppsV1().StatefulSets(info.Namespace).Get(pumpStatefulSetName, metav1.GetOptions{})
if err != nil {
- glog.Errorf("failed to get jobs %s ,%v", pumpStatefulSetName, err)
+ klog.Errorf("failed to get jobs %s ,%v", pumpStatefulSetName, err)
return false, nil
}
if pumpStatefulSet.Status.Replicas != pumpStatefulSet.Status.ReadyReplicas {
- glog.Errorf("pump replicas is not ready, please wait ! %s ", pumpStatefulSetName)
+ klog.Errorf("pump replicas is not ready, please wait ! %s ", pumpStatefulSetName)
return false, nil
}
@@ -2895,7 +2876,7 @@ func (oa *operatorActions) CheckIncrementalBackup(info *TidbClusterConfig, withD
pods, err := oa.kubeCli.CoreV1().Pods(info.Namespace).List(listOps)
if err != nil {
- glog.Errorf("failed to get pods via pump labels %s ,%v", pumpStatefulSetName, err)
+ klog.Errorf("failed to get pods via pump labels %s ,%v", pumpStatefulSetName, err)
return false, nil
}
@@ -2905,7 +2886,7 @@ func (oa *operatorActions) CheckIncrementalBackup(info *TidbClusterConfig, withD
for _, pod := range pods.Items {
if !oa.pumpHealth(info, pod.Name) {
- glog.Errorf("some pods is not health %s", pumpStatefulSetName)
+ klog.Errorf("some pods is not health %s", pumpStatefulSetName)
return false, nil
}
@@ -2913,11 +2894,11 @@ func (oa *operatorActions) CheckIncrementalBackup(info *TidbClusterConfig, withD
continue
}
- glog.Info(pod.Spec.Affinity)
+ klog.Info(pod.Spec.Affinity)
if pod.Spec.Affinity == nil || pod.Spec.Affinity.PodAntiAffinity == nil || len(pod.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) != 1 {
return true, fmt.Errorf("pump pod %s/%s should have affinity set", pod.Namespace, pod.Name)
}
- glog.Info(pod.Spec.Tolerations)
+ klog.Info(pod.Spec.Tolerations)
foundKey := false
for _, tor := range pod.Spec.Tolerations {
if tor.Key == "node-role" {
@@ -2937,11 +2918,11 @@ func (oa *operatorActions) CheckIncrementalBackup(info *TidbClusterConfig, withD
drainerStatefulSetName := fmt.Sprintf("%s-drainer", info.ClusterName)
drainerStatefulSet, err := oa.kubeCli.AppsV1().StatefulSets(info.Namespace).Get(drainerStatefulSetName, metav1.GetOptions{})
if err != nil {
- glog.Errorf("failed to get jobs %s ,%v", pumpStatefulSetName, err)
+ klog.Errorf("failed to get jobs %s ,%v", pumpStatefulSetName, err)
return false, nil
}
if drainerStatefulSet.Status.Replicas != drainerStatefulSet.Status.ReadyReplicas {
- glog.Errorf("drainer replicas is not ready, please wait ! %s ", pumpStatefulSetName)
+ klog.Errorf("drainer replicas is not ready, please wait ! %s ", pumpStatefulSetName)
return false, nil
}
@@ -2961,7 +2942,7 @@ func (oa *operatorActions) CheckIncrementalBackup(info *TidbClusterConfig, withD
}
for _, pod := range pods.Items {
if !oa.drainerHealth(info, pod.Name) {
- glog.Errorf("some pods is not health %s", drainerStatefulSetName)
+ klog.Errorf("some pods is not health %s", drainerStatefulSetName)
return false, nil
}
@@ -2969,11 +2950,11 @@ func (oa *operatorActions) CheckIncrementalBackup(info *TidbClusterConfig, withD
continue
}
- glog.Info(pod.Spec.Affinity)
+ klog.Info(pod.Spec.Affinity)
if pod.Spec.Affinity == nil || pod.Spec.Affinity.PodAntiAffinity == nil || len(pod.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) != 1 {
return true, fmt.Errorf("drainer pod %s/%s should have spec.affinity set", pod.Namespace, pod.Name)
}
- glog.Info(pod.Spec.Tolerations)
+ klog.Info(pod.Spec.Tolerations)
foundKey := false
for _, tor := range pod.Spec.Tolerations {
if tor.Key == "node-role" {
@@ -3007,7 +2988,7 @@ func (oa *operatorActions) RegisterWebHookAndServiceOrDie(configName, namespace,
func (oa *operatorActions) RegisterWebHookAndService(configName, namespace, service string, context *apimachinery.CertContext) error {
client := oa.kubeCli
- glog.Infof("Registering the webhook via the AdmissionRegistration API")
+ klog.Infof("Registering the webhook via the AdmissionRegistration API")
failurePolicy := admissionV1beta1.Fail
@@ -3040,7 +3021,7 @@ func (oa *operatorActions) RegisterWebHookAndService(configName, namespace, serv
})
if err != nil {
- glog.Errorf("registering webhook config %s with namespace %s error %v", configName, namespace, err)
+ klog.Errorf("registering webhook config %s with namespace %s error %v", configName, namespace, err)
return err
}
@@ -3079,7 +3060,7 @@ func (oa *operatorActions) pumpHealth(info *TidbClusterConfig, podName string) b
if oa.fw != nil {
localHost, localPort, cancel, err := portforward.ForwardOnePort(oa.fw, info.Namespace, fmt.Sprintf("pod/%s", podName), 8250)
if err != nil {
- glog.Errorf("failed to forward port %d for %s/%s", 8250, info.Namespace, podName)
+ klog.Errorf("failed to forward port %d for %s/%s", 8250, info.Namespace, podName)
return false
}
defer cancel()
@@ -3090,27 +3071,27 @@ func (oa *operatorActions) pumpHealth(info *TidbClusterConfig, podName string) b
pumpHealthURL := fmt.Sprintf("http://%s/status", addr)
res, err := http.Get(pumpHealthURL)
if err != nil {
- glog.Errorf("cluster:[%s] call %s failed,error:%v", info.ClusterName, pumpHealthURL, err)
+ klog.Errorf("cluster:[%s] call %s failed,error:%v", info.ClusterName, pumpHealthURL, err)
return false
}
if res.StatusCode >= 400 {
- glog.Errorf("Error response %v", res.StatusCode)
+ klog.Errorf("Error response %v", res.StatusCode)
return false
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
- glog.Errorf("cluster:[%s] read response body failed,error:%v", info.ClusterName, err)
+ klog.Errorf("cluster:[%s] read response body failed,error:%v", info.ClusterName, err)
return false
}
healths := pumpStatus{}
err = json.Unmarshal(body, &healths)
if err != nil {
- glog.Errorf("cluster:[%s] unmarshal failed,error:%v", info.ClusterName, err)
+ klog.Errorf("cluster:[%s] unmarshal failed,error:%v", info.ClusterName, err)
return false
}
for _, status := range healths.StatusMap {
if status.State != "online" {
- glog.Errorf("cluster:[%s] pump's state is not online", info.ClusterName)
+ klog.Errorf("cluster:[%s] pump's state is not online", info.ClusterName)
return false
}
}
@@ -3145,30 +3126,30 @@ func (oa *operatorActions) drainerHealth(info *TidbClusterConfig, podName string
PreserveWhitespace: false,
})
if err != nil {
- glog.Errorf("failed to run command '%s' in pod %s/%q", cmd, info.Namespace, podName)
+ klog.Errorf("failed to run command '%s' in pod %s/%q", cmd, info.Namespace, podName)
return false
}
body = []byte(stdout)
} else {
res, err := http.Get(drainerHealthURL)
if err != nil {
- glog.Errorf("cluster:[%s] call %s failed,error:%v", info.ClusterName, drainerHealthURL, err)
+ klog.Errorf("cluster:[%s] call %s failed,error:%v", info.ClusterName, drainerHealthURL, err)
return false
}
if res.StatusCode >= 400 {
- glog.Errorf("Error response %v", res.StatusCode)
+ klog.Errorf("Error response %v", res.StatusCode)
return false
}
body, err = ioutil.ReadAll(res.Body)
if err != nil {
- glog.Errorf("cluster:[%s] read response body failed,error:%v", info.ClusterName, err)
+ klog.Errorf("cluster:[%s] read response body failed,error:%v", info.ClusterName, err)
return false
}
}
healths := drainerStatus{}
err = json.Unmarshal(body, &healths)
if err != nil {
- glog.Errorf("cluster:[%s] unmarshal failed,error:%v", info.ClusterName, err)
+ klog.Errorf("cluster:[%s] unmarshal failed,error:%v", info.ClusterName, err)
return false
}
return len(healths.PumpPos) > 0
@@ -3178,7 +3159,7 @@ func (oa *operatorActions) EmitEvent(info *TidbClusterConfig, message string) {
oa.lock.Lock()
defer oa.lock.Unlock()
- glog.Infof("Event: %s", message)
+ klog.Infof("Event: %s", message)
if !oa.eventWorkerRunning {
return
@@ -3215,7 +3196,7 @@ func (oa *operatorActions) RunEventWorker() {
oa.lock.Lock()
oa.eventWorkerRunning = true
oa.lock.Unlock()
- glog.Infof("Event worker started")
+ klog.Infof("Event worker started")
wait.Forever(oa.eventWorker, 10*time.Second)
}
@@ -3245,12 +3226,12 @@ func (oa *operatorActions) eventWorker() {
},
}
if err := client.AddAnnotation(anno); err != nil {
- glog.V(4).Infof("cluster:[%s/%s] error recording event: %s, reason: %v",
+ klog.V(4).Infof("cluster:[%s/%s] error recording event: %s, reason: %v",
ns, clusterName, ev.message, err)
retryEvents = append(retryEvents, ev)
continue
}
- glog.Infof("cluster: [%s/%s] recoding event: %s", ns, clusterName, ev.message)
+ klog.Infof("cluster: [%s/%s] recoding event: %s", ns, clusterName, ev.message)
}
ce := oa.clusterEvents[key]
@@ -3279,7 +3260,7 @@ func (oa *operatorActions) checkManualPauseComponent(info *TidbClusterConfig, co
fn := func() (bool, error) {
if tc, err = oa.cli.PingcapV1alpha1().TidbClusters(ns).Get(info.ClusterName, metav1.GetOptions{}); err != nil {
- glog.Infof("failed to get tidbcluster: [%s/%s], %v", ns, info.ClusterName, err)
+ klog.Infof("failed to get tidbcluster: [%s/%s], %v", ns, info.ClusterName, err)
return false, nil
}
@@ -3289,19 +3270,19 @@ func (oa *operatorActions) checkManualPauseComponent(info *TidbClusterConfig, co
setName = controller.TiDBMemberName(info.ClusterName)
tidbPod, err := oa.kubeCli.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil {
- glog.Infof("fail to get pod in CheckManualPauseCompoent tidb [%s/%s]", ns, podName)
+ klog.Infof("fail to get pod in CheckManualPauseCompoent tidb [%s/%s]", ns, podName)
return false, nil
}
if tidbPod.Labels[v1.ControllerRevisionHashLabelKey] == tc.Status.TiDB.StatefulSet.UpdateRevision &&
tc.Status.TiDB.Phase == v1alpha1.UpgradePhase {
if member, ok := tc.Status.TiDB.Members[tidbPod.Name]; !ok || !member.Health {
- glog.Infof("wait for tidb pod [%s/%s] ready member health %t ok %t", ns, podName, member.Health, ok)
+ klog.Infof("wait for tidb pod [%s/%s] ready member health %t ok %t", ns, podName, member.Health, ok)
} else {
return true, nil
}
} else {
- glog.Infof("tidbset is not in upgrade phase or pod is not upgrade done [%s/%s]", ns, podName)
+ klog.Infof("tidbset is not in upgrade phase or pod is not upgrade done [%s/%s]", ns, podName)
}
return false, nil
@@ -3310,7 +3291,7 @@ func (oa *operatorActions) checkManualPauseComponent(info *TidbClusterConfig, co
setName = controller.TiKVMemberName(info.ClusterName)
tikvPod, err := oa.kubeCli.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil {
- glog.Infof("fail to get pod in CheckManualPauseCompoent tikv [%s/%s]", ns, podName)
+ klog.Infof("fail to get pod in CheckManualPauseCompoent tikv [%s/%s]", ns, podName)
return false, nil
}
@@ -3324,12 +3305,12 @@ func (oa *operatorActions) checkManualPauseComponent(info *TidbClusterConfig, co
}
}
if tikvStore == nil || tikvStore.State != v1alpha1.TiKVStateUp {
- glog.Infof("wait for tikv pod [%s/%s] ready store state %s", ns, podName, tikvStore.State)
+ klog.Infof("wait for tikv pod [%s/%s] ready store state %s", ns, podName, tikvStore.State)
} else {
return true, nil
}
} else {
- glog.Infof("tikvset is not in upgrade phase or pod is not upgrade done [%s/%s]", ns, podName)
+ klog.Infof("tikvset is not in upgrade phase or pod is not upgrade done [%s/%s]", ns, podName)
}
return false, nil
@@ -3362,24 +3343,24 @@ func (oa *operatorActions) CheckUpgradeComplete(info *TidbClusterConfig) error {
if err := wait.PollImmediate(15*time.Second, 30*time.Minute, func() (done bool, err error) {
tc, err := oa.cli.PingcapV1alpha1().TidbClusters(ns).Get(tcName, metav1.GetOptions{})
if err != nil {
- glog.Errorf("checkUpgradeComplete, [%s/%s] cannot get tidbcluster, %v", ns, tcName, err)
+ klog.Errorf("checkUpgradeComplete, [%s/%s] cannot get tidbcluster, %v", ns, tcName, err)
return false, nil
}
if tc.Status.PD.Phase == v1alpha1.UpgradePhase {
- glog.Errorf("checkUpgradeComplete, [%s/%s] PD is still upgrading", ns, tcName)
+ klog.Errorf("checkUpgradeComplete, [%s/%s] PD is still upgrading", ns, tcName)
return false, nil
}
if tc.Status.TiKV.Phase == v1alpha1.UpgradePhase {
- glog.Errorf("checkUpgradeComplete, [%s/%s] TiKV is still upgrading", ns, tcName)
+ klog.Errorf("checkUpgradeComplete, [%s/%s] TiKV is still upgrading", ns, tcName)
return false, nil
}
if tc.Status.TiDB.Phase == v1alpha1.UpgradePhase {
- glog.Errorf("checkUpgradeComplete, [%s/%s] TiDB is still upgrading", ns, tcName)
+ klog.Errorf("checkUpgradeComplete, [%s/%s] TiDB is still upgrading", ns, tcName)
return false, nil
}
return true, nil
}); err != nil {
- glog.Errorf("failed to wait upgrade complete [%s/%s], %v", ns, tcName, err)
+ klog.Errorf("failed to wait upgrade complete [%s/%s], %v", ns, tcName, err)
return err
}
return nil
@@ -3407,7 +3388,7 @@ func (oa *operatorActions) CheckInitSQL(info *TidbClusterConfig) error {
return true, nil
}); err != nil {
- glog.Errorf("failed to check init sql complete [%s/%s], %v", ns, tcName, err)
+ klog.Errorf("failed to check init sql complete [%s/%s], %v", ns, tcName, err)
return err
}
return nil
@@ -3427,7 +3408,7 @@ func (oa *operatorActions) WaitForTidbClusterReady(tc *v1alpha1.TidbCluster, tim
var local *v1alpha1.TidbCluster
var err error
if local, err = oa.cli.PingcapV1alpha1().TidbClusters(tc.Namespace).Get(tc.Name, metav1.GetOptions{}); err != nil {
- glog.Errorf("failed to get tidbcluster: %s/%s, %v", tc.Namespace, tc.Name, err)
+ klog.Errorf("failed to get tidbcluster: %s/%s, %v", tc.Namespace, tc.Name, err)
return false, nil
}
@@ -3484,7 +3465,7 @@ func StartValidatingAdmissionWebhookServerOrDie(context *apimachinery.CertContex
if err := server.ListenAndServeTLS("", ""); err != nil {
sendErr := slack.SendErrMsg(err.Error())
if sendErr != nil {
- glog.Error(sendErr)
+ klog.Error(sendErr)
}
panic(fmt.Sprintf("failed to start webhook server %v", err))
}
diff --git a/tests/backup.go b/tests/backup.go
index c13e0a7d1d..3e4ea7cff9 100644
--- a/tests/backup.go
+++ b/tests/backup.go
@@ -27,7 +27,7 @@ import (
"golang.org/x/sync/errgroup"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
const (
@@ -70,13 +70,13 @@ func (oa *operatorActions) BackupAndRestoreToMultipleClusters(source *TidbCluste
err = oa.DeployAdHocBackup(source)
if err != nil {
- glog.Errorf("cluster:[%s] deploy happen error: %v", source.ClusterName, err)
+ klog.Errorf("cluster:[%s] deploy happen error: %v", source.ClusterName, err)
return err
}
ts, err := oa.CheckAdHocBackup(source)
if err != nil {
- glog.Errorf("cluster:[%s] deploy happen error: %v", source.ClusterName, err)
+ klog.Errorf("cluster:[%s] deploy happen error: %v", source.ClusterName, err)
return err
}
@@ -86,19 +86,19 @@ func (oa *operatorActions) BackupAndRestoreToMultipleClusters(source *TidbCluste
prepareIncremental := func(source *TidbClusterConfig, target BackupTarget) error {
err = oa.CheckTidbClusterStatus(target.TargetCluster)
if err != nil {
- glog.Errorf("cluster:[%s] deploy faild error: %v", target.TargetCluster.ClusterName, err)
+ klog.Errorf("cluster:[%s] deploy faild error: %v", target.TargetCluster.ClusterName, err)
return err
}
err = oa.Restore(source, target.TargetCluster)
if err != nil {
- glog.Errorf("from cluster:[%s] to cluster [%s] restore happen error: %v",
+ klog.Errorf("from cluster:[%s] to cluster [%s] restore happen error: %v",
source.ClusterName, target.TargetCluster.ClusterName, err)
return err
}
err = oa.CheckRestore(source, target.TargetCluster)
if err != nil {
- glog.Errorf("from cluster:[%s] to cluster [%s] restore failed error: %v",
+ klog.Errorf("from cluster:[%s] to cluster [%s] restore failed error: %v",
source.ClusterName, target.TargetCluster.ClusterName, err)
return err
}
@@ -158,13 +158,13 @@ func (oa *operatorActions) BackupAndRestoreToMultipleClusters(source *TidbCluste
if err != nil {
return err
}
- glog.Infof("waiting 30 seconds to insert into more records")
+ klog.Infof("waiting 30 seconds to insert into more records")
time.Sleep(30 * time.Second)
- glog.Infof("cluster[%s] stop insert data", source.ClusterName)
+ klog.Infof("cluster[%s] stop insert data", source.ClusterName)
oa.StopInsertDataTo(source)
- glog.Infof("wait on-going inserts to be drained for 60 seconds")
+ klog.Infof("wait on-going inserts to be drained for 60 seconds")
time.Sleep(60 * time.Second)
dsn, cancel, err := oa.getTiDBDSN(source.Namespace, source.ClusterName, "test", source.Password)
@@ -190,7 +190,7 @@ func (oa *operatorActions) BackupAndRestoreToMultipleClusters(source *TidbCluste
oa.BeginInsertDataToOrDie(source)
err = oa.DeployScheduledBackup(source)
if err != nil {
- glog.Errorf("cluster:[%s] scheduler happen error: %v", source.ClusterName, err)
+ klog.Errorf("cluster:[%s] scheduler happen error: %v", source.ClusterName, err)
return err
}
@@ -240,7 +240,7 @@ func (oa *operatorActions) CheckDataConsistency(from, to *TidbClusterConfig, tim
fn := func() (bool, error) {
b, err := oa.DataIsTheSameAs(to, from)
if err != nil {
- glog.Error(err)
+ klog.Error(err)
return false, nil
}
if b {
@@ -254,7 +254,7 @@ func (oa *operatorActions) CheckDataConsistency(from, to *TidbClusterConfig, tim
func (oa *operatorActions) DeployDrainer(info *DrainerConfig, source *TidbClusterConfig) error {
oa.EmitEvent(source, "DeployDrainer")
- glog.Infof("begin to deploy drainer [%s] namespace[%s], source cluster [%s]", info.DrainerName,
+ klog.Infof("begin to deploy drainer [%s] namespace[%s], source cluster [%s]", info.DrainerName,
source.Namespace, source.ClusterName)
valuesPath, err := info.BuildSubValues(oa.drainerChartPath(source.OperatorTag))
@@ -269,7 +269,7 @@ func (oa *operatorActions) DeployDrainer(info *DrainerConfig, source *TidbCluste
cmd := fmt.Sprintf("helm install %s --name %s --namespace %s --set-string %s -f %s",
oa.drainerChartPath(source.OperatorTag), info.DrainerName, source.Namespace, info.DrainerHelmString(override, source), valuesPath)
- glog.Info(cmd)
+ klog.Info(cmd)
if res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput(); err != nil {
return fmt.Errorf("failed to deploy drainer [%s/%s], %v, %s",
@@ -286,23 +286,23 @@ func (oa *operatorActions) DeployDrainerOrDie(info *DrainerConfig, source *TidbC
}
func (oa *operatorActions) CheckDrainer(info *DrainerConfig, source *TidbClusterConfig) error {
- glog.Infof("checking drainer [%s/%s]", info.DrainerName, source.Namespace)
+ klog.Infof("checking drainer [%s/%s]", info.DrainerName, source.Namespace)
ns := source.Namespace
stsName := fmt.Sprintf("%s-%s-drainer", source.ClusterName, info.DrainerName)
fn := func() (bool, error) {
sts, err := oa.kubeCli.AppsV1().StatefulSets(source.Namespace).Get(stsName, v1.GetOptions{})
if err != nil {
- glog.Errorf("failed to get drainer StatefulSet %s ,%v", sts, err)
+ klog.Errorf("failed to get drainer StatefulSet %s ,%v", sts, err)
return false, nil
}
if *sts.Spec.Replicas != DrainerReplicas {
- glog.Infof("StatefulSet: %s/%s .spec.Replicas(%d) != %d",
+ klog.Infof("StatefulSet: %s/%s .spec.Replicas(%d) != %d",
ns, sts.Name, *sts.Spec.Replicas, DrainerReplicas)
return false, nil
}
if sts.Status.ReadyReplicas != DrainerReplicas {
- glog.Infof("StatefulSet: %s/%s .state.ReadyReplicas(%d) != %d",
+ klog.Infof("StatefulSet: %s/%s .state.ReadyReplicas(%d) != %d",
ns, sts.Name, sts.Status.ReadyReplicas, DrainerReplicas)
}
return true, nil
@@ -317,7 +317,7 @@ func (oa *operatorActions) CheckDrainer(info *DrainerConfig, source *TidbCluster
}
func (oa *operatorActions) RestoreIncrementalFiles(from *DrainerConfig, to *TidbClusterConfig, stopTSO int64) error {
- glog.Infof("restoring incremental data from drainer [%s/%s] to TiDB cluster [%s/%s]",
+ klog.Infof("restoring incremental data from drainer [%s/%s] to TiDB cluster [%s/%s]",
from.Namespace, from.DrainerName, to.Namespace, to.ClusterName)
// TODO: better incremental files restore solution
@@ -354,7 +354,7 @@ func (oa *operatorActions) RestoreIncrementalFiles(from *DrainerConfig, to *Tidb
}
cmd := buff.String()
- glog.Infof("Restore incremental data, command: \n%s", cmd)
+ klog.Infof("Restore incremental data, command: \n%s", cmd)
if res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput(); err != nil {
return fmt.Errorf("failed to restore incremental files from dainer [%s/%s] to TiDB cluster [%s/%s], %v, %s",
diff --git a/tests/cluster_info.go b/tests/cluster_info.go
index 009aeb2847..2b6a613615 100644
--- a/tests/cluster_info.go
+++ b/tests/cluster_info.go
@@ -18,7 +18,7 @@ import (
"os"
"strconv"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
func (tc *TidbClusterConfig) set(name string, value string) (string, bool) {
@@ -161,6 +161,6 @@ func (tc *TidbClusterConfig) BuildSubValues(path string) (string, error) {
if err != nil {
return "", err
}
- glog.V(4).Infof("subValues:\n %s", subValues)
+ klog.V(4).Infof("subValues:\n %s", subValues)
return subVaulesPath, nil
}
diff --git a/tests/cmd/fault-trigger/main.go b/tests/cmd/fault-trigger/main.go
index 1e36dc4cb8..c5a1d1e9bf 100644
--- a/tests/cmd/fault-trigger/main.go
+++ b/tests/cmd/fault-trigger/main.go
@@ -24,7 +24,7 @@ import (
"github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/manager"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/component-base/logs"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
var (
@@ -52,5 +52,5 @@ func main() {
server.StartServer()
}, 5*time.Second)
- glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", pprofPort), nil))
+ klog.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", pprofPort), nil))
}
diff --git a/tests/cmd/stability/main.go b/tests/cmd/stability/main.go
index 567e193b3c..0f4c6d2018 100644
--- a/tests/cmd/stability/main.go
+++ b/tests/cmd/stability/main.go
@@ -31,7 +31,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/component-base/logs"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
var cfg *tests.Config
@@ -46,7 +46,7 @@ func main() {
logs.InitLogs()
defer logs.FlushLogs()
go func() {
- glog.Info(http.ListenAndServe(":6060", nil))
+ klog.Info(http.ListenAndServe(":6060", nil))
}()
metrics.StartServer()
cfg = tests.ParseConfigOrDie()
@@ -124,6 +124,7 @@ func run() {
oa.CleanOperatorOrDie(ocfg)
oa.DeployOperatorOrDie(ocfg)
+ klog.Infof(fmt.Sprintf("allclusters: %v", allClusters))
for _, cluster := range allClusters {
oa.CleanTidbClusterOrDie(cluster)
}
@@ -146,9 +147,9 @@ func run() {
for _, cluster := range clusters {
oa.CheckTidbClusterStatusOrDie(cluster)
oa.CheckDisasterToleranceOrDie(cluster)
- go oa.BeginInsertDataToOrDie(cluster)
+ oa.BeginInsertDataToOrDie(cluster)
}
-
+ klog.Infof("clusters deployed and checked")
// scale out
for _, cluster := range clusters {
cluster.ScaleTiDB(3).ScaleTiKV(5).ScalePD(5)
@@ -158,6 +159,7 @@ func run() {
oa.CheckTidbClusterStatusOrDie(cluster)
oa.CheckDisasterToleranceOrDie(cluster)
}
+ klog.Infof("clusters scale out and checked")
// scale in
for _, cluster := range clusters {
@@ -168,19 +170,19 @@ func run() {
oa.CheckTidbClusterStatusOrDie(cluster)
oa.CheckDisasterToleranceOrDie(cluster)
}
+ klog.Infof("clusters scale in and checked")
// upgrade
namespace := os.Getenv("NAMESPACE")
oa.RegisterWebHookAndServiceOrDie(ocfg.WebhookConfigName, namespace, ocfg.WebhookServiceName, certCtx)
ctx, cancel := context.WithCancel(context.Background())
for _, cluster := range clusters {
- assignedNodes := oa.GetTidbMemberAssignedNodesOrDie(cluster)
cluster.UpgradeAll(upgradeVersion)
oa.UpgradeTidbClusterOrDie(cluster)
oa.CheckUpgradeOrDie(ctx, cluster)
oa.CheckTidbClusterStatusOrDie(cluster)
- oa.CheckTidbMemberAssignedNodesOrDie(cluster, assignedNodes)
}
+ klog.Infof("clusters upgraded in checked")
// configuration change
for _, cluster := range clusters {
@@ -210,10 +212,12 @@ func run() {
}
cancel()
oa.CleanWebHookAndServiceOrDie(ocfg.WebhookConfigName)
+ klog.Infof("clusters configurations updated in checked")
for _, cluster := range clusters {
oa.CheckDisasterToleranceOrDie(cluster)
}
+ klog.Infof("clusters DisasterTolerance checked")
// backup and restore
for i := range backupTargets {
@@ -222,11 +226,13 @@ func run() {
oa.CheckTidbClusterStatusOrDie(backupTargets[i].TargetCluster)
}
oa.BackupAndRestoreToMultipleClustersOrDie(clusters[0], backupTargets)
+ klog.Infof("clusters backup and restore checked")
// delete operator
oa.CleanOperatorOrDie(ocfg)
oa.CheckOperatorDownOrDie(deployedClusters)
oa.DeployOperatorOrDie(ocfg)
+ klog.Infof("clusters operator deleted and redeployed, checked")
// stop node
physicalNode, node, faultTime := fta.StopNodeOrDie()
@@ -236,16 +242,20 @@ func run() {
time.Sleep(3 * time.Minute)
fta.StartNodeOrDie(physicalNode, node)
oa.EmitEvent(nil, fmt.Sprintf("StartNode: %s on %s", node, physicalNode))
+ oa.WaitPodOnNodeReadyOrDie(deployedClusters, node)
oa.CheckRecoverOrDie(deployedClusters)
for _, cluster := range deployedClusters {
oa.CheckTidbClusterStatusOrDie(cluster)
}
+ klog.Infof("clusters node stopped and restarted, checked")
// truncate tikv sst file
oa.TruncateSSTFileThenCheckFailoverOrDie(clusters[0], 5*time.Minute)
+ klog.Infof("clusters truncate sst file and checked failover")
// delete pd data
oa.DeletePDDataThenCheckFailoverOrDie(clusters[0], 5*time.Minute)
+ klog.Infof("cluster[%s/%s] DeletePDDataThenCheckFailoverOrDie success", clusters[0].Namespace, clusters[0].ClusterName)
// stop one etcd
faultEtcd := tests.SelectNode(cfg.ETCDs)
@@ -254,23 +264,27 @@ func run() {
time.Sleep(3 * time.Minute)
oa.CheckEtcdDownOrDie(ocfg, deployedClusters, faultEtcd)
fta.StartETCDOrDie(faultEtcd)
+ klog.Infof("clusters stop on etcd and restart")
// stop all etcds
fta.StopETCDOrDie()
time.Sleep(10 * time.Minute)
fta.StartETCDOrDie()
oa.CheckEtcdDownOrDie(ocfg, deployedClusters, "")
+ klog.Infof("clusters stop all etcd and restart")
// stop all kubelets
fta.StopKubeletOrDie()
time.Sleep(10 * time.Minute)
fta.StartKubeletOrDie()
oa.CheckKubeletDownOrDie(ocfg, deployedClusters, "")
+ klog.Infof("clusters stop all kubelets and restart")
// stop all kube-proxy and k8s/operator/tidbcluster is available
fta.StopKubeProxyOrDie()
oa.CheckKubeProxyDownOrDie(ocfg, clusters)
fta.StartKubeProxyOrDie()
+ klog.Infof("clusters stop all kube-proxy and restart")
// stop all kube-scheduler pods
for _, physicalNode := range cfg.APIServers {
@@ -284,6 +298,7 @@ func run() {
fta.StartKubeSchedulerOrDie(vNode.IP)
}
}
+ klog.Infof("clusters stop all kube-scheduler and restart")
// stop all kube-controller-manager pods
for _, physicalNode := range cfg.APIServers {
@@ -297,14 +312,17 @@ func run() {
fta.StartKubeControllerManagerOrDie(vNode.IP)
}
}
+ klog.Infof("clusters stop all kube-controller and restart")
// stop one kube-apiserver pod
faultApiServer := tests.SelectNode(cfg.APIServers)
+ klog.Infof("fault ApiServer Node name = %s", faultApiServer)
fta.StopKubeAPIServerOrDie(faultApiServer)
defer fta.StartKubeAPIServerOrDie(faultApiServer)
time.Sleep(3 * time.Minute)
oa.CheckOneApiserverDownOrDie(ocfg, clusters, faultApiServer)
fta.StartKubeAPIServerOrDie(faultApiServer)
+ klog.Infof("clusters stop one kube-apiserver and restart")
time.Sleep(time.Minute)
// stop all kube-apiserver pods
@@ -319,6 +337,7 @@ func run() {
fta.StartKubeAPIServerOrDie(vNode.IP)
}
}
+ klog.Infof("clusters stop all kube-apiserver and restart")
time.Sleep(time.Minute)
}
@@ -381,7 +400,7 @@ func run() {
}
slack.SuccessCount++
- glog.Infof("################## Stability test finished at: %v\n\n\n\n", time.Now().Format(time.RFC3339))
+ klog.Infof("################## Stability test finished at: %v\n\n\n\n", time.Now().Format(time.RFC3339))
}
func newOperatorConfig() *tests.OperatorConfig {
@@ -442,6 +461,7 @@ func newTidbClusterConfig(ns, clusterName string) *tests.TidbClusterConfig {
"discovery.image": cfg.OperatorImage,
"tikv.defaultcfBlockCacheSize": "8GB",
"tikv.writecfBlockCacheSize": "2GB",
+ "pvReclaimPolicy": "Delete",
},
Args: map[string]string{
"binlog.drainer.workerCount": "1024",
diff --git a/tests/config.go b/tests/config.go
index 38fb686bc8..9dd70dd8b6 100644
--- a/tests/config.go
+++ b/tests/config.go
@@ -21,12 +21,11 @@ import (
"os"
"strings"
- "github.com/pingcap/tidb-operator/tests/slack"
-
+ utiloperator "github.com/pingcap/tidb-operator/tests/e2e/util/operator"
"github.com/pingcap/tidb-operator/tests/pkg/blockwriter"
-
+ "github.com/pingcap/tidb-operator/tests/slack"
"gopkg.in/yaml.v2"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
const (
@@ -44,6 +43,7 @@ type Config struct {
InstallOperator bool `yaml:"install_opeartor" json:"install_opeartor"`
OperatorTag string `yaml:"operator_tag" json:"operator_tag"`
OperatorImage string `yaml:"operator_image" json:"operator_image"`
+ BackupImage string `yaml:"backup_image" json:"backup_image"`
OperatorFeatures map[string]bool `yaml:"operator_features" json:"operator_features"`
UpgradeOperatorTag string `yaml:"upgrade_operator_tag" json:"upgrade_operator_tag"`
UpgradeOperatorImage string `yaml:"upgrade_operator_image" json:"upgrade_operator_image"`
@@ -76,6 +76,8 @@ type Config struct {
E2EImage string `yaml:"e2e_image" json:"e2e_image"`
PreloadImages bool `yaml:"preload_images" json:"preload_images"`
+
+ OperatorKiller utiloperator.OperatorKillerConfig
}
// Nodes defines a series of nodes that belong to the same physical node.
@@ -158,7 +160,7 @@ func ParseConfigOrDie() *Config {
slack.NotifyAndPanic(err)
}
- glog.Infof("using config: %+v", cfg)
+ klog.Infof("using config: %+v", cfg)
return cfg
}
diff --git a/tests/drainer_info.go b/tests/drainer_info.go
index 8ad4f87c5e..7b072f2be7 100644
--- a/tests/drainer_info.go
+++ b/tests/drainer_info.go
@@ -18,7 +18,7 @@ import (
"io/ioutil"
"strings"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
type DbType string
@@ -72,6 +72,6 @@ func (d *DrainerConfig) BuildSubValues(dir string) (string, error) {
if err := ioutil.WriteFile(path, []byte(values), 0644); err != nil {
return "", err
}
- glog.Infof("Values of drainer %s:\n %s", d.DrainerName, values)
+ klog.Infof("Values of drainer %s:\n %s", d.DrainerName, values)
return path, nil
}
diff --git a/tests/dt.go b/tests/dt.go
index 39eb51701a..5ecf247cad 100644
--- a/tests/dt.go
+++ b/tests/dt.go
@@ -25,7 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
const (
@@ -66,14 +66,14 @@ func (oa *operatorActions) LabelNodes() error {
err := wait.PollImmediate(3*time.Second, time.Minute, func() (bool, error) {
n, err := oa.kubeCli.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
if err != nil {
- glog.Errorf("get node:[%s] failed! error: %v", node.Name, err)
+ klog.Errorf("get node:[%s] failed! error: %v", node.Name, err)
return false, nil
}
index := i % RackNum
n.Labels[RackLabel] = fmt.Sprintf("rack%d", index)
_, err = oa.kubeCli.CoreV1().Nodes().Update(n)
if err != nil {
- glog.Errorf("label node:[%s] failed! error: %v", node.Name, err)
+ klog.Errorf("label node:[%s] failed! error: %v", node.Name, err)
return false, nil
}
return true, nil
diff --git a/tests/e2e/config/config.go b/tests/e2e/config/config.go
index a931a82392..a037d122aa 100644
--- a/tests/e2e/config/config.go
+++ b/tests/e2e/config/config.go
@@ -17,6 +17,7 @@ import (
"flag"
"fmt"
"io/ioutil"
+ "time"
"github.com/pingcap/tidb-operator/tests"
v1 "k8s.io/api/core/v1"
@@ -45,6 +46,10 @@ func RegisterTiDBOperatorFlags(flags *flag.FlagSet) {
flags.StringVar(&TestConfig.OperatorRepoUrl, "operator-repo-url", "https://github.com/pingcap/tidb-operator.git", "tidb-operator repo url used")
flags.StringVar(&TestConfig.ChartDir, "chart-dir", "", "chart dir")
flags.BoolVar(&TestConfig.PreloadImages, "preload-images", false, "if set, preload images in the bootstrap of e2e process")
+ flags.StringVar(&TestConfig.BackupImage, "backup-image", "", "backup image")
+ flags.BoolVar(&TestConfig.OperatorKiller.Enabled, "operator-killer", false, "whether to enable operator kill")
+ flags.DurationVar(&TestConfig.OperatorKiller.Interval, "operator-killer-interval", 5*time.Minute, "interval between operator kills")
+ flags.Float64Var(&TestConfig.OperatorKiller.JitterFactor, "operator-killer-jitter-factor", 1, "factor used to jitter operator kills")
}
func AfterReadingAllFlags() error {
@@ -104,6 +109,7 @@ func NewDefaultOperatorConfig(cfg *tests.Config) *tests.OperatorConfig {
StsWebhookEnabled: true,
PodWebhookEnabled: false,
Cabundle: "",
+ BackupImage: cfg.BackupImage,
}
}
diff --git a/tests/e2e/e2e.go b/tests/e2e/e2e.go
index 95346946a3..202d13c0d5 100644
--- a/tests/e2e/e2e.go
+++ b/tests/e2e/e2e.go
@@ -34,18 +34,34 @@ import (
"github.com/pingcap/tidb-operator/tests"
e2econfig "github.com/pingcap/tidb-operator/tests/e2e/config"
utilimage "github.com/pingcap/tidb-operator/tests/e2e/util/image"
+ utilnode "github.com/pingcap/tidb-operator/tests/e2e/util/node"
+ utiloperator "github.com/pingcap/tidb-operator/tests/e2e/util/operator"
v1 "k8s.io/api/core/v1"
+ storagev1 "k8s.io/api/storage/v1"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
runtimeutils "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/component-base/logs"
"k8s.io/klog"
aggregatorclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
+ storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
+
+ // ensure auth plugins are loaded
+ _ "k8s.io/client-go/plugin/pkg/client/auth"
+
+ // ensure that cloud providers are loaded
+ _ "k8s.io/kubernetes/test/e2e/framework/providers/aws"
+ _ "k8s.io/kubernetes/test/e2e/framework/providers/gce"
+)
+
+var (
+ operatorKillerStopCh chan struct{}
)
// This is modified from framework.SetupSuite().
@@ -65,16 +81,20 @@ func setupSuite() {
// Delete any namespaces except those created by the system. This ensures no
// lingering resources are left over from a previous test run.
if framework.TestContext.CleanStart {
- deleted, err := framework.DeleteNamespaces(c, nil, /* deleteFilter */
- []string{
- metav1.NamespaceSystem,
- metav1.NamespaceDefault,
- metav1.NamespacePublic,
- v1.NamespaceNodeLease,
- // kind local path provisioner namespace since 0.7.0
- // https://github.com/kubernetes-sigs/kind/blob/v0.7.0/pkg/build/node/storage.go#L35
- "local-path-storage",
- })
+ reservedNamespaces := []string{
+ metav1.NamespaceSystem,
+ metav1.NamespaceDefault,
+ metav1.NamespacePublic,
+ v1.NamespaceNodeLease,
+ }
+ if framework.TestContext.Provider == "kind" {
+ // kind local path provisioner namespace since 0.7.0
+ // https://github.com/kubernetes-sigs/kind/blob/v0.7.0/pkg/build/node/storage.go#L35
+ reservedNamespaces = append(reservedNamespaces, "local-path-storage")
+ } else if framework.TestContext.Provider == "openshift" {
+ reservedNamespaces = append(reservedNamespaces, "openshift")
+ }
+ deleted, err := framework.DeleteNamespaces(c, nil, reservedNamespaces)
if err != nil {
e2elog.Failf("Error deleting orphaned namespaces: %v", err)
}
@@ -113,6 +133,47 @@ func setupSuite() {
e2elog.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
}
+ ginkgo.By("Initializing all nodes")
+ nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
+ framework.ExpectNoError(err)
+ for _, node := range nodeList.Items {
+ framework.Logf("Initializing node %q", node.Name)
+ framework.ExpectNoError(utilnode.InitNode(&node))
+ }
+
+ // By using default storage class in GKE/EKS (aws), network attached storage
+ // which be used and we must clean them later.
+ // We set local-storage class as default for simplicity.
+ // The default storage class of kind is local-path-provisioner which
+ // consumes local storage like local-volume-provisioner.
+ if framework.TestContext.Provider == "gke" || framework.TestContext.Provider == "aws" {
+ defaultSCName := "local-storage"
+ list, err := c.StorageV1().StorageClasses().List(metav1.ListOptions{})
+ framework.ExpectNoError(err)
+ // only one storage class can be marked default
+ // https://kubernetes.io/docs/tasks/administer-cluster/change-default-storage-class/#changing-the-default-storageclass
+ var localStorageSC *storagev1.StorageClass
+ for i, sc := range list.Items {
+ if sc.Name == defaultSCName {
+ localStorageSC = &list.Items[i]
+ } else if storageutil.IsDefaultAnnotation(sc.ObjectMeta) {
+ delete(sc.ObjectMeta.Annotations, storageutil.IsDefaultStorageClassAnnotation)
+ _, err = c.StorageV1().StorageClasses().Update(&sc)
+ framework.ExpectNoError(err)
+ }
+ }
+ if localStorageSC == nil {
+ e2elog.Fail("local-storage storage class not found")
+ }
+ if localStorageSC.Annotations == nil {
+ localStorageSC.Annotations = map[string]string{}
+ }
+ localStorageSC.Annotations[storageutil.IsDefaultStorageClassAnnotation] = "true"
+ e2elog.Logf("Setting %q as the default storage class", localStorageSC.Name)
+ _, err = c.StorageV1().StorageClasses().Update(localStorageSC)
+ framework.ExpectNoError(err)
+ }
+
// Log the version of the server and this client.
e2elog.Logf("e2e test version: %s", version.Get().GitVersion)
@@ -133,13 +194,13 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
if err := exec.Command("sh", "-c", helmClearCmd).Run(); err != nil {
framework.Failf("failed to clear helm releases (cmd: %q, error: %v", helmClearCmd, err)
}
- ginkgo.By("Clear non-kubernetes apiservices")
- clearNonK8SAPIServicesCmd := "kubectl delete apiservices -l kube-aggregator.kubernetes.io/automanaged!=onstart"
- if err := exec.Command("sh", "-c", clearNonK8SAPIServicesCmd).Run(); err != nil {
- framework.Failf("failed to clear non-kubernetes apiservices (cmd: %q, error: %v", clearNonK8SAPIServicesCmd, err)
+ ginkgo.By("Clear tidb-operator apiservices")
+ clearAPIServicesCmd := "kubectl delete apiservices -l app.kubernetes.io/name=tidb-operator"
+ if err := exec.Command("sh", "-c", clearAPIServicesCmd).Run(); err != nil {
+ framework.Failf("failed to clear non-kubernetes apiservices (cmd: %q, error: %v", clearAPIServicesCmd, err)
}
- ginkgo.By("Clear validatingwebhookconfigurations")
- clearValidatingWebhookConfigurationsCmd := "kubectl delete validatingwebhookconfiguration --all"
+ ginkgo.By("Clear tidb-operator validatingwebhookconfigurations")
+ clearValidatingWebhookConfigurationsCmd := "kubectl delete validatingwebhookconfiguration -l app.kubernetes.io/name=tidb-operator"
if err := exec.Command("sh", "-c", clearValidatingWebhookConfigurationsCmd).Run(); err != nil {
framework.Failf("failed to clear validatingwebhookconfigurations (cmd: %q, error: %v", clearValidatingWebhookConfigurationsCmd, err)
}
@@ -172,6 +233,9 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
pvList, err := kubeCli.CoreV1().PersistentVolumes().List(metav1.ListOptions{})
framework.ExpectNoError(err, "failed to list pvList")
for _, pv := range pvList.Items {
+ if pv.Spec.StorageClassName != "local-storage" {
+ continue
+ }
if pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
continue
}
@@ -180,13 +244,16 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
_, err = kubeCli.CoreV1().PersistentVolumes().Update(&pv)
framework.ExpectNoError(err, fmt.Sprintf("failed to update pv %s", pv.Name))
}
- ginkgo.By("Wait for all PVs to be available")
+ ginkgo.By("Wait for all local PVs to be available")
err = wait.Poll(time.Second, time.Minute, func() (bool, error) {
pvList, err := kubeCli.CoreV1().PersistentVolumes().List(metav1.ListOptions{})
if err != nil {
return false, err
}
for _, pv := range pvList.Items {
+ if pv.Spec.StorageClassName != "local-storage" {
+ continue
+ }
if pv.Status.Phase != v1.VolumeAvailable {
return false, nil
}
@@ -205,6 +272,21 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
ginkgo.By("Installing tidb-operator")
oa.CleanOperatorOrDie(ocfg)
oa.DeployOperatorOrDie(ocfg)
+ if e2econfig.TestConfig.OperatorKiller.Enabled {
+ operatorKiller := utiloperator.NewOperatorKiller(e2econfig.TestConfig.OperatorKiller, kubeCli, func() ([]v1.Pod, error) {
+ podList, err := kubeCli.CoreV1().Pods(ocfg.Namespace).List(metav1.ListOptions{
+ LabelSelector: labels.SelectorFromSet(map[string]string{
+ "app.kubernetes.io/name": "tidb-operator",
+ }).String(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ return podList.Items, nil
+ })
+ operatorKillerStopCh := make(chan struct{})
+ go operatorKiller.Run(operatorKillerStopCh)
+ }
} else {
ginkgo.By("Skip installing tidb-operator")
}
@@ -218,6 +300,9 @@ var _ = ginkgo.SynchronizedAfterSuite(func() {
framework.CleanupSuite()
}, func() {
framework.AfterSuiteActions()
+ if operatorKillerStopCh != nil {
+ close(operatorKillerStopCh)
+ }
})
// RunE2ETests checks configuration parameters (specified through flags) and then runs
@@ -232,6 +317,11 @@ func RunE2ETests(t *testing.T) {
gomega.RegisterFailHandler(e2elog.Fail)
+ // Disable serial and stability tests by default unless they are explicitly requested.
+ if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
+ config.GinkgoConfig.SkipString = `\[Stability\]|\[Serial\]`
+ }
+
// Run tests through the Ginkgo runner with output to console + JUnit for Jenkins
var r []ginkgo.Reporter
if framework.TestContext.ReportDir != "" {
diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go
index 15f2546fe1..41f4c52b9a 100644
--- a/tests/e2e/e2e_test.go
+++ b/tests/e2e/e2e_test.go
@@ -22,6 +22,7 @@ import (
"time"
e2econfig "github.com/pingcap/tidb-operator/tests/e2e/config"
+ "k8s.io/klog"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/config"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
@@ -42,10 +43,23 @@ func handleFlags() {
flag.Parse()
}
+func init() {
+ framework.RegisterProvider("kind", func() (framework.ProviderInterface, error) {
+ return framework.NullProvider{}, nil
+ })
+ framework.RegisterProvider("openshift", func() (framework.ProviderInterface, error) {
+ return framework.NullProvider{}, nil
+ })
+}
+
func TestMain(m *testing.M) {
// Register test flags, then parse flags.
handleFlags()
+ flag.CommandLine.VisitAll(func(flag *flag.Flag) {
+ klog.V(1).Infof("FLAG: --%s=%q", flag.Name, flag.Value)
+ })
+
// Now that we know which Viper config (if any) was chosen,
// parse it and update those options which weren't already set via command line flags
// (which have higher priority).
diff --git a/tests/e2e/tidbcluster/serial.go b/tests/e2e/tidbcluster/serial.go
index cd9b900cc2..5c35dc548c 100644
--- a/tests/e2e/tidbcluster/serial.go
+++ b/tests/e2e/tidbcluster/serial.go
@@ -18,6 +18,7 @@ import (
"encoding/json"
"fmt"
_ "net/http/pprof"
+ "strconv"
"time"
"github.com/onsi/ginkgo"
@@ -29,15 +30,18 @@ import (
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/label"
"github.com/pingcap/tidb-operator/pkg/scheme"
+ "github.com/pingcap/tidb-operator/pkg/util"
"github.com/pingcap/tidb-operator/tests"
e2econfig "github.com/pingcap/tidb-operator/tests/e2e/config"
utilimage "github.com/pingcap/tidb-operator/tests/e2e/util/image"
utilpod "github.com/pingcap/tidb-operator/tests/e2e/util/pod"
"github.com/pingcap/tidb-operator/tests/e2e/util/portforward"
+ "github.com/pingcap/tidb-operator/tests/e2e/util/proxiedpdclient"
utilstatefulset "github.com/pingcap/tidb-operator/tests/e2e/util/statefulset"
"github.com/pingcap/tidb-operator/tests/pkg/fixture"
v1 "k8s.io/api/core/v1"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
@@ -61,6 +65,7 @@ func mustToString(set sets.Int32) string {
return string(b)
}
+// Serial specs describe tests which cannot run in parallel.
var _ = ginkgo.Describe("[tidb-operator][Serial]", func() {
f := framework.NewDefaultFramework("serial")
@@ -106,15 +111,118 @@ var _ = ginkgo.Describe("[tidb-operator][Serial]", func() {
}
})
+ ginkgo.Context("tidb-operator with default values", func() {
+ var ocfg *tests.OperatorConfig
+ var oa tests.OperatorActions
+ var genericCli client.Client
+
+ ginkgo.BeforeEach(func() {
+ ocfg = &tests.OperatorConfig{
+ Namespace: ns,
+ ReleaseName: "operator",
+ Image: cfg.OperatorImage,
+ Tag: cfg.OperatorTag,
+ }
+ oa = tests.NewOperatorActions(cli, c, asCli, aggrCli, apiExtCli, tests.DefaultPollInterval, ocfg, e2econfig.TestConfig, nil, fw, f)
+ ginkgo.By("Installing CRDs")
+ oa.CleanCRDOrDie()
+ oa.InstallCRDOrDie(ocfg)
+ ginkgo.By("Installing tidb-operator")
+ oa.CleanOperatorOrDie(ocfg)
+ oa.DeployOperatorOrDie(ocfg)
+ var err error
+ genericCli, err = client.New(config, client.Options{Scheme: scheme.Scheme})
+ framework.ExpectNoError(err, "failed to create clientset")
+ })
+
+ ginkgo.AfterEach(func() {
+ ginkgo.By("Uninstall tidb-operator")
+ oa.CleanOperatorOrDie(ocfg)
+ ginkgo.By("Uninstalling CRDs")
+ oa.CleanCRDOrDie()
+ })
+
+ // There is no guarantee but tidb pods should be assigned back to
+ // previous nodes if no other pods to occupy the positions.
+ // See docs/design-proposals/tidb-stable-scheduling.md
+ ginkgo.It("[Feature: StableScheduling] TiDB pods should be scheduled to preivous nodes", func() {
+ clusterName := "tidb-scheduling"
+ tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV3Version)
+ tc.Spec.PD.Replicas = 1
+ tc.Spec.TiKV.Replicas = 1
+ tc.Spec.TiDB.Replicas = 3
+ err := genericCli.Create(context.TODO(), tc)
+ framework.ExpectNoError(err)
+ err = oa.WaitForTidbClusterReady(tc, 30*time.Minute, 15*time.Second)
+ framework.ExpectNoError(err)
+
+ listOptions := metav1.ListOptions{
+ LabelSelector: labels.SelectorFromSet(
+ label.New().Instance(clusterName).Component(label.TiDBLabelVal).Labels()).String(),
+ }
+ oldPodList, err := c.CoreV1().Pods(ns).List(listOptions)
+ framework.ExpectNoError(err)
+
+ ginkgo.By("Update tidb configuration")
+ updateStrategy := v1alpha1.ConfigUpdateStrategyRollingUpdate
+ err = controller.GuaranteedUpdate(genericCli, tc, func() error {
+ tc.Spec.TiDB.Config.TokenLimit = func(i uint) *uint {
+ return &i
+ }(2000)
+ tc.Spec.TiDB.ConfigUpdateStrategy = &updateStrategy
+ return nil
+ })
+ framework.ExpectNoError(err)
+
+ ginkgo.By("Waiting for all tidb pods are recreated and assigned to the same node")
+ getOldPodByName := func(pod *v1.Pod) *v1.Pod {
+ for _, oldPod := range oldPodList.Items {
+ if oldPod.Name == pod.Name {
+ return &oldPod
+ }
+ }
+ return nil
+ }
+ err = wait.PollImmediate(time.Second*5, time.Minute*15, func() (bool, error) {
+ newPodList, err := c.CoreV1().Pods(ns).List(listOptions)
+ if err != nil && !apierrors.IsNotFound(err) {
+ return false, err
+ }
+ if apierrors.IsNotFound(err) {
+ return false, nil
+ }
+ if len(newPodList.Items) != len(oldPodList.Items) {
+ return false, nil
+ }
+ for _, newPod := range newPodList.Items {
+ oldPod := getOldPodByName(&newPod)
+ if oldPod == nil {
+ return false, fmt.Errorf("found an unexpected pod: %q", newPod.Name)
+ }
+ if oldPod.UID == newPod.UID {
+ // not recreated yet
+ return false, nil
+ }
+ if oldPod.Spec.NodeName != newPod.Spec.NodeName {
+ // recreated but assigned to another node
+ return false, fmt.Errorf("pod %q recreated but not assigned to previous node %q, got %q", oldPod.Name, oldPod.Spec.NodeName, newPod.Spec.NodeName)
+ }
+ }
+ return true, nil
+ })
+ framework.ExpectNoError(err)
+ })
+ })
+
// tidb-operator with AdvancedStatefulSet feature enabled
- ginkgo.Context("[Feature: AdvancedStatefulSet]", func() {
+ ginkgo.Context("[Feature: AdvancedStatefulSet][Feature: Webhook]", func() {
var ocfg *tests.OperatorConfig
var oa tests.OperatorActions
var genericCli client.Client
ginkgo.BeforeEach(func() {
ocfg = &tests.OperatorConfig{
- Namespace: "pingcap",
+ Namespace: ns,
ReleaseName: "operator",
Image: cfg.OperatorImage,
Tag: cfg.OperatorTag,
@@ -123,9 +231,12 @@ var _ = ginkgo.Describe("[tidb-operator][Serial]", func() {
"StableScheduling=true",
"AdvancedStatefulSet=true",
},
- LogLevel: "4",
- ImagePullPolicy: v1.PullIfNotPresent,
- TestMode: true,
+ LogLevel: "4",
+ ImagePullPolicy: v1.PullIfNotPresent,
+ TestMode: true,
+ WebhookEnabled: true,
+ PodWebhookEnabled: true,
+ StsWebhookEnabled: false,
}
oa = tests.NewOperatorActions(cli, c, asCli, aggrCli, apiExtCli, tests.DefaultPollInterval, ocfg, e2econfig.TestConfig, nil, fw, f)
ginkgo.By("Installing CRDs")
@@ -147,13 +258,15 @@ var _ = ginkgo.Describe("[tidb-operator][Serial]", func() {
})
ginkgo.It("Scaling tidb cluster with advanced statefulset", func() {
- clusterName := "deploy"
- cluster := newTidbClusterConfig(e2econfig.TestConfig, ns, clusterName, "", "")
- cluster.Resources["pd.replicas"] = "3"
- cluster.Resources["tikv.replicas"] = "5"
- cluster.Resources["tidb.replicas"] = "3"
- oa.DeployTidbClusterOrDie(&cluster)
- oa.CheckTidbClusterStatusOrDie(&cluster)
+ clusterName := "scaling-with-asts"
+ tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV3Version)
+ tc.Spec.PD.Replicas = 3
+ tc.Spec.TiKV.Replicas = 5
+ tc.Spec.TiDB.Replicas = 5
+ err := genericCli.Create(context.TODO(), tc)
+ framework.ExpectNoError(err)
+ err = oa.WaitForTidbClusterReady(tc, 30*time.Minute, 15*time.Second)
+ framework.ExpectNoError(err)
scalingTests := []struct {
name string
@@ -257,14 +370,14 @@ var _ = ginkgo.Describe("[tidb-operator][Serial]", func() {
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting for all pods of tidb cluster component %s (sts: %s/%s) are in desired state (replicas: %d, delete slots: %v)", st.component, ns, stsName, st.replicas, st.deleteSlots.List()))
- err = wait.PollImmediate(time.Second*5, time.Minute*10, func() (bool, error) {
+ err = wait.PollImmediate(time.Second*5, time.Minute*15, func() (bool, error) {
// check replicas and delete slots are synced
sts, err = hc.AppsV1().StatefulSets(ns).Get(stsName, metav1.GetOptions{})
if err != nil {
return false, nil
}
if *sts.Spec.Replicas != st.replicas {
- klog.Infof("replicas of sts %s/%s is %d, expects %d", ns, stsName, sts.Spec.Replicas, st.replicas)
+ klog.Infof("replicas of sts %s/%s is %d, expects %d", ns, stsName, *sts.Spec.Replicas, st.replicas)
return false, nil
}
if !helper.GetDeleteSlots(sts).Equal(st.deleteSlots) {
@@ -289,7 +402,8 @@ var _ = ginkgo.Describe("[tidb-operator][Serial]", func() {
}
}
- oa.CheckTidbClusterStatusOrDie(&cluster)
+ err = oa.WaitForTidbClusterReady(tc, 30*time.Minute, 15*time.Second)
+ framework.ExpectNoError(err)
})
})
@@ -299,7 +413,7 @@ var _ = ginkgo.Describe("[tidb-operator][Serial]", func() {
var genericCli client.Client
ocfg = &tests.OperatorConfig{
- Namespace: "pingcap",
+ Namespace: ns,
ReleaseName: "operator",
Image: cfg.OperatorImage,
Tag: cfg.OperatorTag,
@@ -347,7 +461,7 @@ var _ = ginkgo.Describe("[tidb-operator][Serial]", func() {
e2elog.Failf("at least 3 statefulsets must be created, got %d", len(stsList.Items))
}
- podListBeforeUpgrade, err := c.CoreV1().Pods(tc.Namespace).List(metav1.ListOptions{})
+ podListBeforeUpgrade, err := c.CoreV1().Pods(tc.Namespace).List(listOption)
framework.ExpectNoError(err)
ginkgo.By("Upgrading tidb-operator with AdvancedStatefulSet feature")
@@ -380,9 +494,9 @@ var _ = ginkgo.Describe("[tidb-operator][Serial]", func() {
})
framework.ExpectNoError(err)
- ginkgo.By("Make sure pods are not affected")
- err = utilpod.WaitForPodsAreNotAffected(c, podListBeforeUpgrade.Items, time.Minute*3)
- framework.ExpectEqual(err, wait.ErrWaitTimeout, "Pods was not affeteced after the operator is upgraded")
+ ginkgo.By("Make sure pods are not changed")
+ err = utilpod.WaitForPodsAreChanged(c, podListBeforeUpgrade.Items, time.Minute*3)
+ framework.ExpectEqual(err, wait.ErrWaitTimeout, "Pods are changed after the operator is upgraded")
})
// tidb-operator with pod admission webhook enabled
@@ -393,7 +507,7 @@ var _ = ginkgo.Describe("[tidb-operator][Serial]", func() {
ginkgo.BeforeEach(func() {
ocfg = &tests.OperatorConfig{
- Namespace: "pingcap",
+ Namespace: ns,
ReleaseName: "operator",
Image: cfg.OperatorImage,
Tag: cfg.OperatorTag,
@@ -447,7 +561,7 @@ var _ = ginkgo.Describe("[tidb-operator][Serial]", func() {
ginkgo.BeforeEach(func() {
ocfg = &tests.OperatorConfig{
- Namespace: "pingcap",
+ Namespace: ns,
ReleaseName: "operator",
Image: cfg.OperatorImage,
Tag: cfg.OperatorTag,
@@ -584,9 +698,6 @@ var _ = ginkgo.Describe("[tidb-operator][Serial]", func() {
if empty, err := gomega.BeEmpty().Match(newTC.Spec.TiDB.BaseImage); empty {
e2elog.Failf("Expected tidb.baseImage has default value set, %v", err)
}
- if isNil, err := gomega.BeNil().Match(newTC.Spec.TiDB.Config); isNil {
- e2elog.Failf("Expected tidb.config has default value set, %v", err)
- }
ginkgo.By("Validating should reject illegal update")
newTC.Labels = map[string]string{
@@ -595,12 +706,437 @@ var _ = ginkgo.Describe("[tidb-operator][Serial]", func() {
_, err = cli.PingcapV1alpha1().TidbClusters(ns).Update(newTC)
framework.ExpectError(err, "Could not set instance label with value other than cluster name")
- newTC.Spec.PD.Config.Replication = &v1alpha1.PDReplicationConfig{
- MaxReplicas: func() *uint64 { i := uint64(5); return &i }(),
+ newTC.Spec.PD.Config = &v1alpha1.PDConfig{
+ Replication: &v1alpha1.PDReplicationConfig{
+ MaxReplicas: func() *uint64 { i := uint64(5); return &i }(),
+ },
}
_, err = cli.PingcapV1alpha1().TidbClusters(ns).Update(newTC)
framework.ExpectError(err, "PD replication config is immutable through CR")
})
})
+ ginkgo.Context("[Feature: AutoScaling]", func() {
+ var ocfg *tests.OperatorConfig
+ var oa tests.OperatorActions
+
+ ginkgo.BeforeEach(func() {
+ ocfg = &tests.OperatorConfig{
+ Namespace: ns,
+ ReleaseName: "operator",
+ Image: cfg.OperatorImage,
+ Tag: cfg.OperatorTag,
+ LogLevel: "4",
+ TestMode: true,
+ WebhookEnabled: true,
+ PodWebhookEnabled: true,
+ Features: []string{
+ "AutoScaling=true",
+ },
+ }
+ oa = tests.NewOperatorActions(cli, c, asCli, aggrCli, apiExtCli, tests.DefaultPollInterval, ocfg, e2econfig.TestConfig, nil, fw, f)
+ ginkgo.By("Installing CRDs")
+ oa.CleanCRDOrDie()
+ oa.InstallCRDOrDie(ocfg)
+ ginkgo.By("Installing tidb-operator")
+ oa.CleanOperatorOrDie(ocfg)
+ oa.DeployOperatorOrDie(ocfg)
+ })
+
+ ginkgo.AfterEach(func() {
+ ginkgo.By("Uninstall tidb-operator")
+ oa.CleanOperatorOrDie(ocfg)
+ ginkgo.By("Uninstalling CRDs")
+ oa.CleanCRDOrDie()
+ })
+
+ ginkgo.It("auto-scaling TidbCluster", func() {
+ clusterName := "auto-scaling"
+ tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV3Version)
+ tc.Spec.PD.Replicas = 1
+ tc.Spec.TiKV.Replicas = 3
+ tc.Spec.TiDB.Replicas = 2
+ _, err := cli.PingcapV1alpha1().TidbClusters(ns).Create(tc)
+ framework.ExpectNoError(err, "Create TidbCluster error")
+ err = oa.WaitForTidbClusterReady(tc, 30*time.Minute, 15*time.Second)
+ framework.ExpectNoError(err, "Check TidbCluster error")
+ monitor := fixture.NewTidbMonitor("monitor", ns, tc, false, false)
+ _, err = cli.PingcapV1alpha1().TidbMonitors(ns).Create(monitor)
+ framework.ExpectNoError(err, "Create TidbMonitor error")
+ err = tests.CheckTidbMonitor(monitor, c, fw)
+ framework.ExpectNoError(err, "Check TidbMonitor error")
+ tac := fixture.GetTidbClusterAutoScaler("auto-scaler", ns, tc, monitor)
+
+ //TODO we should mock the tidbmonitor metrics data to check the metrics calculating
+ // Currently these steps are checked by unit test
+ // For now, we make minReplicas and maxReplicas equal to run the auto-scaling
+
+ // Scale Tikv To 4 replicas and Check
+ tac.Spec.TiKV = &v1alpha1.TikvAutoScalerSpec{
+ BasicAutoScalerSpec: v1alpha1.BasicAutoScalerSpec{
+ MaxReplicas: 4,
+ MinReplicas: pointer.Int32Ptr(4),
+ },
+ }
+ _, err = cli.PingcapV1alpha1().TidbClusterAutoScalers(ns).Create(tac)
+ framework.ExpectNoError(err, "Create TidbMonitorClusterAutoScaler error")
+ pdClient, cancel, err := proxiedpdclient.NewProxiedPDClient(c, fw, ns, clusterName, false, nil)
+ framework.ExpectNoError(err, "create pdapi error")
+ defer cancel()
+ var firstScaleTimestamp int64
+ err = wait.Poll(10*time.Second, 10*time.Minute, func() (done bool, err error) {
+ tc, err := cli.PingcapV1alpha1().TidbClusters(tc.Namespace).Get(tc.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, nil
+ }
+ // check replicas
+ if tc.Spec.TiKV.Replicas != int32(4) {
+ klog.Infof("tikv haven't auto-scale to 4 replicas")
+ return false, nil
+ }
+ if len(tc.Status.TiKV.Stores) != 4 {
+ klog.Infof("tikv's stores haven't auto-scale to 4")
+ return false, nil
+ }
+ // check annotations
+ if tc.Annotations == nil || len(tc.Annotations) < 1 {
+ klog.Infof("tc haven't marked any annotation")
+ return false, nil
+ }
+ tac, err = cli.PingcapV1alpha1().TidbClusterAutoScalers(ns).Get(tac.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, nil
+ }
+ if tac.Annotations == nil || len(tac.Annotations) < 1 {
+ klog.Infof("tac haven't marked any annotation")
+ return false, nil
+ }
+ v, ok := tac.Annotations[label.AnnTiKVLastAutoScalingTimestamp]
+ if !ok {
+ klog.Infof("tac haven't marked any annotation")
+ return false, nil
+ }
+ firstScaleTimestamp, err = strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ return false, err
+ }
+ // check store label
+ storeId := ""
+ for k, v := range tc.Status.TiKV.Stores {
+ if v.PodName == util.GetPodName(tc, v1alpha1.TiKVMemberType, int32(3)) {
+ storeId = k
+ break
+ }
+ }
+ if storeId == "" {
+ return false, nil
+ }
+ sid, err := strconv.ParseUint(storeId, 10, 64)
+ if err != nil {
+ return false, err
+ }
+ info, err := pdClient.GetStore(sid)
+ if err != nil {
+ return false, nil
+ }
+ for _, label := range info.Store.Labels {
+ if label.Key == "specialUse" && label.Value == "hotRegion" {
+ return true, nil
+ }
+ }
+ klog.Infof("tikv auto-scale out haven't find the special label")
+ return false, nil
+ })
+ framework.ExpectNoError(err, "check tikv auto-scale to 4 error")
+ klog.Info("success to check tikv auto scale-out to 4 replicas")
+
+ // Scale Tikv To 3 replicas and Check
+ tac, err = cli.PingcapV1alpha1().TidbClusterAutoScalers(ns).Get(tac.Name, metav1.GetOptions{})
+ framework.ExpectNoError(err, "Get TidbCluster AutoScaler err")
+ tac.Spec.TiKV = &v1alpha1.TikvAutoScalerSpec{
+ BasicAutoScalerSpec: v1alpha1.BasicAutoScalerSpec{
+ MaxReplicas: 3,
+ MinReplicas: pointer.Int32Ptr(3),
+ ScaleInIntervalSeconds: pointer.Int32Ptr(100),
+ },
+ }
+ _, err = cli.PingcapV1alpha1().TidbClusterAutoScalers(ns).Update(tac)
+ framework.ExpectNoError(err, "Update TidbMonitorClusterAutoScaler error")
+ err = wait.Poll(5*time.Second, 5*time.Minute, func() (done bool, err error) {
+ tc, err = cli.PingcapV1alpha1().TidbClusters(tc.Namespace).Get(tc.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, nil
+ }
+ if tc.Spec.TiKV.Replicas != 3 {
+ klog.Info("tikv haven't auto-scale to 3 replicas")
+ return false, nil
+ }
+ if len(tc.Status.TiKV.Stores) != 3 {
+ klog.Info("tikv's store haven't auto-scale to 3")
+ return false, nil
+ }
+ if tc.Annotations != nil && len(tc.Annotations) > 0 {
+ _, ok := tc.Annotations[label.AnnTiKVAutoScalingOutOrdinals]
+ if ok {
+ klog.Infof("tikv auto-scale out annotation still exists")
+ return false, nil
+ }
+ }
+ tac, err = cli.PingcapV1alpha1().TidbClusterAutoScalers(ns).Get(tac.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, nil
+ }
+ if tac.Annotations == nil || len(tac.Annotations) < 1 {
+ klog.Infof("tc haven't marked any annotation")
+ return false, nil
+ }
+ v, ok := tac.Annotations[label.AnnTiKVLastAutoScalingTimestamp]
+ if !ok {
+ klog.Infof("tac haven't marked any annotation")
+ return false, nil
+ }
+ secondTs, err := strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ return false, err
+ }
+ if secondTs == firstScaleTimestamp {
+ klog.Info("tikv haven't scale yet")
+ return false, nil
+ }
+ if secondTs-firstScaleTimestamp < 100 {
+ return false, fmt.Errorf("tikv second scale's interval isn't meeting the interval requirement")
+ }
+ return true, nil
+ })
+ framework.ExpectNoError(err, "check tikv auto-scale to 3 error")
+ klog.Info("success to check tikv auto scale-in to 3 replicas")
+
+ // Scale Tidb to 3 replicas and Check
+ tac, err = cli.PingcapV1alpha1().TidbClusterAutoScalers(ns).Get(tac.Name, metav1.GetOptions{})
+ framework.ExpectNoError(err, "Get TidbCluster AutoScaler err")
+ tac.Spec.TiKV = nil
+ tac.Spec.TiDB = &v1alpha1.TidbAutoScalerSpec{
+ BasicAutoScalerSpec: v1alpha1.BasicAutoScalerSpec{
+ MaxReplicas: 3,
+ MinReplicas: pointer.Int32Ptr(3),
+ },
+ }
+ _, err = cli.PingcapV1alpha1().TidbClusterAutoScalers(ns).Update(tac)
+ framework.ExpectNoError(err, "Update TidbMonitorClusterAutoScaler error")
+
+ err = wait.Poll(5*time.Second, 5*time.Minute, func() (done bool, err error) {
+ tc, err = cli.PingcapV1alpha1().TidbClusters(tc.Namespace).Get(tc.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, nil
+ }
+ if tc.Spec.TiDB.Replicas != 3 {
+ klog.Info("tidb haven't auto-scaler to 3 replicas")
+ return false, nil
+ }
+ tac, err = cli.PingcapV1alpha1().TidbClusterAutoScalers(ns).Get(tac.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, nil
+ }
+ if tac.Annotations == nil || len(tac.Annotations) < 1 {
+ klog.Info("tac haven't marked any annotations")
+ return false, nil
+ }
+ v, ok := tac.Annotations[label.AnnTiDBLastAutoScalingTimestamp]
+ if !ok {
+ klog.Info("tac haven't marked tidb auto-scaler timstamp annotation")
+ return false, nil
+ }
+ firstScaleTimestamp, err = strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ return false, err
+ }
+ return true, nil
+ })
+ framework.ExpectNoError(err, "check tidb auto-scale to 3 error")
+ klog.Infof("success to check tidb auto scale-out to 3 replicas")
+
+ // Scale Tidb to 2 Replicas and Check
+ tac, err = cli.PingcapV1alpha1().TidbClusterAutoScalers(ns).Get(tac.Name, metav1.GetOptions{})
+ framework.ExpectNoError(err, "Get TidbCluster AutoScaler err")
+ tac.Spec.TiKV = nil
+ tac.Spec.TiDB = &v1alpha1.TidbAutoScalerSpec{
+ BasicAutoScalerSpec: v1alpha1.BasicAutoScalerSpec{
+ MaxReplicas: 2,
+ MinReplicas: pointer.Int32Ptr(2),
+ ScaleInIntervalSeconds: pointer.Int32Ptr(100),
+ },
+ }
+ _, err = cli.PingcapV1alpha1().TidbClusterAutoScalers(ns).Update(tac)
+ framework.ExpectNoError(err, "Update TidbMonitorClusterAutoScaler error")
+
+ err = wait.Poll(5*time.Second, 5*time.Minute, func() (done bool, err error) {
+ tc, err = cli.PingcapV1alpha1().TidbClusters(tc.Namespace).Get(tc.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, nil
+ }
+ if tc.Spec.TiDB.Replicas != 2 {
+ klog.Info("tidb haven't auto-scaler to 2 replicas")
+ return false, nil
+ }
+ tac, err = cli.PingcapV1alpha1().TidbClusterAutoScalers(ns).Get(tac.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, nil
+ }
+ if tac.Annotations == nil || len(tac.Annotations) < 1 {
+ klog.Info("tac haven't marked any annotations")
+ return false, nil
+ }
+ v, ok := tac.Annotations[label.AnnTiDBLastAutoScalingTimestamp]
+ if !ok {
+ klog.Info("tac haven't marked tidb auto-scale timestamp")
+ return false, nil
+ }
+ secondTs, err := strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ return false, err
+ }
+ if secondTs == firstScaleTimestamp {
+ klog.Info("tidb haven't scale yet")
+ return false, nil
+ }
+ if secondTs-firstScaleTimestamp < 100 {
+ return false, fmt.Errorf("tikv second scale's interval isn't meeting the interval requirement")
+ }
+ return true, nil
+ })
+ framework.ExpectNoError(err, "check tidb auto-scale to 2 error")
+ klog.Info("success to check auto scale-in tidb to 2 replicas")
+ })
+ })
+
+ ginkgo.Context("[Verify: Upgrading Operator from 1.0.6", func() {
+ var oa tests.OperatorActions
+ var ocfg *tests.OperatorConfig
+ var version string
+
+ ginkgo.BeforeEach(func() {
+ version = "v1.0.6"
+ ocfg = &tests.OperatorConfig{
+ Namespace: ns,
+ ReleaseName: "operator",
+ Tag: version,
+ Image: fmt.Sprintf("pingcap/tidb-operator:%s", version),
+ }
+ oa = tests.NewOperatorActions(cli, c, asCli, aggrCli, apiExtCli, tests.DefaultPollInterval, ocfg, e2econfig.TestConfig, nil, fw, f)
+ ginkgo.By("Installing CRDs")
+ oa.CleanCRDOrDie()
+ tests.DeployReleasedCRDOrDie(version)
+ ginkgo.By("Installing tidb-operator")
+ oa.CleanOperatorOrDie(ocfg)
+ oa.DeployOperatorOrDie(ocfg)
+ })
+
+ ginkgo.AfterEach(func() {
+ ginkgo.By("Uninstall tidb-operator")
+ oa.CleanOperatorOrDie(ocfg)
+ ginkgo.By("Uninstalling CRDs")
+ tests.CleanReleasedCRDOrDie(version)
+ })
+
+ ginkgo.It("Deploy TidbCluster and Upgrade Operator", func() {
+ tcName := "tidbcluster"
+ cluster := newTidbClusterConfig(e2econfig.TestConfig, ns, tcName, "", "")
+ cluster.Resources["pd.replicas"] = "3"
+ cluster.Resources["tikv.replicas"] = "3"
+ cluster.Resources["tidb.replicas"] = "2"
+ cluster.Monitor = false
+ cluster.OperatorTag = version
+ oa.DeployTidbClusterOrDie(&cluster)
+ oa.CheckTidbClusterStatusOrDie(&cluster)
+
+ getPods := func(ls string) ([]v1.Pod, error) {
+ listOptions := metav1.ListOptions{
+ LabelSelector: ls,
+ }
+ podList, err := c.CoreV1().Pods(ns).List(listOptions)
+ if err != nil {
+ return nil, err
+ }
+ return podList.Items, nil
+ }
+
+ tc, err := cli.PingcapV1alpha1().TidbClusters(ns).Get(tcName, metav1.GetOptions{})
+ framework.ExpectNoError(err, "failed to get tc")
+
+ pdPods, err := getPods(labels.SelectorFromSet(label.New().Instance(tcName).PD().Labels()).String())
+ framework.ExpectNoError(err, "failed to get pd pods")
+
+ tikvPods, err := getPods(labels.SelectorFromSet(label.New().Instance(tcName).TiKV().Labels()).String())
+ framework.ExpectNoError(err, "failed to get tikv pods")
+
+ tidbPods, err := getPods(labels.SelectorFromSet(label.New().Instance(tcName).TiDB().Labels()).String())
+ framework.ExpectNoError(err, "failed to get tidb pods")
+
+ // Upgrade CRD / Operator to current version
+ ocfg.Tag = cfg.OperatorTag
+ ocfg.Image = cfg.OperatorImage
+ oa.InstallCRDOrDie(ocfg)
+ oa.UpgradeOperatorOrDie(ocfg)
+
+ // confirm the tidb has been changed
+ err = wait.Poll(5*time.Second, 5*time.Minute, func() (done bool, err error) {
+ newTc, err := cli.PingcapV1alpha1().TidbClusters(ns).Get(tcName, metav1.GetOptions{})
+ if err != nil {
+ return false, nil
+ }
+ // wait tidb to be updated
+ if tc.Status.TiDB.StatefulSet.CurrentRevision == newTc.Status.TiDB.StatefulSet.CurrentRevision {
+ klog.Info("wait tidb to be updated")
+ return false, nil
+ }
+ // wait tidb finish updating
+ if newTc.Status.TiDB.StatefulSet.CurrentRevision != newTc.Status.TiDB.StatefulSet.UpdateRevision {
+ klog.Info("wait tidb finish updating")
+ return false, nil
+ }
+
+ // confirm the tidb pod have been changed
+ changed, err := utilpod.PodsAreChanged(c, tidbPods)()
+ if changed {
+ klog.Infof("confirm tidb pods have been changed")
+ } else {
+ if err != nil {
+ klog.Errorf("meet error during verify tidb pods, err:%v", err)
+ return false, nil
+ }
+ if !changed {
+ return false, fmt.Errorf("tidb should be updated after operator upgrading")
+ }
+ }
+ return true, nil
+ })
+ framework.ExpectNoError(err, "Failed to check Tidb Status After Upgrading Operator")
+
+ err = wait.Poll(5*time.Second, 5*time.Minute, func() (done bool, err error) {
+ // confirm the pd Pod haven't been changed
+ changed, err := utilpod.PodsAreChanged(c, pdPods)()
+ if err != nil {
+ klog.Errorf("meet error during verify pd pods, err:%v", err)
+ return true, nil
+ }
+ if changed {
+ return true, nil
+ }
+ klog.Infof("confirm pd pods haven't been changed this time")
+
+ // confirm the tikv haven't been changed
+ changed, err = utilpod.PodsAreChanged(c, tikvPods)()
+ if err != nil {
+ klog.Errorf("meet error during verify tikv pods, err:%v", err)
+ return true, nil
+ }
+ if changed {
+ return true, nil
+ }
+ klog.Infof("confirm tikv pods haven't been changed this time")
+ return false, nil
+ })
+ framework.ExpectError(err, "expect tikv and pd haven't been changed for 5 minutes")
+ })
+ })
})
diff --git a/tests/e2e/tidbcluster/stability.go b/tests/e2e/tidbcluster/stability.go
new file mode 100644
index 0000000000..e2dc42c175
--- /dev/null
+++ b/tests/e2e/tidbcluster/stability.go
@@ -0,0 +1,488 @@
+// Copyright 2019 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tidbcluster
+
+import (
+ "context"
+ "fmt"
+ _ "net/http/pprof"
+ "time"
+
+ "github.com/onsi/ginkgo"
+ "github.com/onsi/gomega"
+ asclientset "github.com/pingcap/advanced-statefulset/pkg/client/clientset/versioned"
+ "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned"
+ "github.com/pingcap/tidb-operator/pkg/label"
+ "github.com/pingcap/tidb-operator/pkg/scheme"
+ "github.com/pingcap/tidb-operator/tests"
+ e2econfig "github.com/pingcap/tidb-operator/tests/e2e/config"
+ utilimage "github.com/pingcap/tidb-operator/tests/e2e/util/image"
+ utilnode "github.com/pingcap/tidb-operator/tests/e2e/util/node"
+ utilpod "github.com/pingcap/tidb-operator/tests/e2e/util/pod"
+ "github.com/pingcap/tidb-operator/tests/e2e/util/portforward"
+ "github.com/pingcap/tidb-operator/tests/e2e/util/proxiedpdclient"
+ utiltidb "github.com/pingcap/tidb-operator/tests/e2e/util/tidb"
+ utiltikv "github.com/pingcap/tidb-operator/tests/e2e/util/tikv"
+ "github.com/pingcap/tidb-operator/tests/pkg/fixture"
+ v1 "k8s.io/api/core/v1"
+ apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/apimachinery/pkg/util/wait"
+ clientset "k8s.io/client-go/kubernetes"
+ restclient "k8s.io/client-go/rest"
+ aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
+ "k8s.io/kubernetes/test/e2e/framework"
+ e2enode "k8s.io/kubernetes/test/e2e/framework/node"
+ "k8s.io/utils/pointer"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// Stability specs describe tests which involve disruptive operations, e.g.
+// stop kubelet, kill nodes, empty pd/tikv data.
+// Like serial tests, they cannot run in parallel too.
+var _ = ginkgo.Describe("[tidb-operator][Stability]", func() {
+ f := framework.NewDefaultFramework("stability")
+
+ var ns string
+ var c clientset.Interface
+ var cli versioned.Interface
+ var asCli asclientset.Interface
+ var aggrCli aggregatorclient.Interface
+ var apiExtCli apiextensionsclientset.Interface
+ var cfg *tests.Config
+ var config *restclient.Config
+ var fw portforward.PortForward
+ var fwCancel context.CancelFunc
+
+ ginkgo.BeforeEach(func() {
+ ns = f.Namespace.Name
+ c = f.ClientSet
+ var err error
+ config, err = framework.LoadConfig()
+ framework.ExpectNoError(err, "failed to load config")
+ cli, err = versioned.NewForConfig(config)
+ framework.ExpectNoError(err, "failed to create clientset")
+ asCli, err = asclientset.NewForConfig(config)
+ framework.ExpectNoError(err, "failed to create clientset")
+ aggrCli, err = aggregatorclient.NewForConfig(config)
+ framework.ExpectNoError(err, "failed to create clientset")
+ apiExtCli, err = apiextensionsclientset.NewForConfig(config)
+ framework.ExpectNoError(err, "failed to create clientset")
+ clientRawConfig, err := e2econfig.LoadClientRawConfig()
+ framework.ExpectNoError(err, "failed to load raw config")
+ ctx, cancel := context.WithCancel(context.Background())
+ fw, err = portforward.NewPortForwarder(ctx, e2econfig.NewSimpleRESTClientGetter(clientRawConfig))
+ framework.ExpectNoError(err, "failed to create port forwarder")
+ fwCancel = cancel
+ cfg = e2econfig.TestConfig
+ })
+
+ ginkgo.AfterEach(func() {
+ if fwCancel != nil {
+ fwCancel()
+ }
+ })
+
+ ginkgo.Context("operator with default values", func() {
+ var ocfg *tests.OperatorConfig
+ var oa tests.OperatorActions
+ var genericCli client.Client
+
+ ginkgo.BeforeEach(func() {
+ ocfg = &tests.OperatorConfig{
+ Namespace: ns,
+ ReleaseName: "operator",
+ Image: cfg.OperatorImage,
+ Tag: cfg.OperatorTag,
+ LogLevel: "4",
+ TestMode: true,
+ }
+ oa = tests.NewOperatorActions(cli, c, asCli, aggrCli, apiExtCli, tests.DefaultPollInterval, ocfg, e2econfig.TestConfig, nil, fw, f)
+ ginkgo.By("Installing CRDs")
+ oa.CleanCRDOrDie()
+ oa.InstallCRDOrDie(ocfg)
+ ginkgo.By("Installing tidb-operator")
+ oa.CleanOperatorOrDie(ocfg)
+ oa.DeployOperatorOrDie(ocfg)
+ var err error
+ genericCli, err = client.New(config, client.Options{Scheme: scheme.Scheme})
+ framework.ExpectNoError(err, "failed to create clientset")
+ })
+
+ ginkgo.AfterEach(func() {
+ ginkgo.By("Uninstall tidb-operator")
+ oa.CleanOperatorOrDie(ocfg)
+ })
+
+ testCases := []struct {
+ name string
+ fn func()
+ }{
+ {
+ name: "tidb-operator does not exist",
+ fn: func() {
+ ginkgo.By("Uninstall tidb-operator")
+ oa.CleanOperatorOrDie(ocfg)
+ },
+ },
+ }
+
+ for _, test := range testCases {
+ ginkgo.It("tidb cluster should not be affected while "+test.name, func() {
+ clusterName := "test"
+ tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV3Version)
+ err := genericCli.Create(context.TODO(), tc)
+ framework.ExpectNoError(err)
+ err = oa.WaitForTidbClusterReady(tc, 30*time.Minute, 15*time.Second)
+ framework.ExpectNoError(err)
+
+ test.fn()
+
+ ginkgo.By("Check tidb cluster is not affected")
+ listOptions := metav1.ListOptions{
+ LabelSelector: labels.SelectorFromSet(label.New().Instance(clusterName).Labels()).String(),
+ }
+ podList, err := c.CoreV1().Pods(ns).List(listOptions)
+ framework.ExpectNoError(err)
+ err = wait.PollImmediate(time.Second*30, time.Minute*5, func() (bool, error) {
+ var ok bool
+ var err error
+ framework.Logf("check whether pods of cluster %q are changed", clusterName)
+ ok, err = utilpod.PodsAreChanged(c, podList.Items)()
+ if ok || err != nil {
+ // pod changed or some error happened
+ return true, err
+ }
+ framework.Logf("check whether pods of cluster %q are running", clusterName)
+ newPodList, err := c.CoreV1().Pods(ns).List(listOptions)
+ if err != nil {
+ return false, err
+ }
+ for _, pod := range newPodList.Items {
+ if pod.Status.Phase != v1.PodRunning {
+ return false, fmt.Errorf("pod %s/%s is not running", pod.Namespace, pod.Name)
+ }
+ }
+ framework.Logf("check whehter tidb cluster %q is connectable", clusterName)
+ ok, err = utiltidb.TiDBIsConnectable(fw, ns, clusterName, "root", "")()
+ if !ok || err != nil {
+ // not connectable or some error happened
+ return true, err
+ }
+ return false, nil
+ })
+ framework.ExpectEqual(err, wait.ErrWaitTimeout, "TiDB cluster is not affeteced")
+ })
+ }
+
+ // In this test, we demonstrate and verify the recover process when a
+ // node (and local storage on it) is permanently gone.
+ //
+ // In cloud, a node can be deleted manually or reclaimed by a
+ // controller (e.g. auto scaling group if ReplaceUnhealthy not
+ // suspended). Local storage on it will be permanently unaccessible.
+ // Manual intervention is required to recover from this situation.
+ // Basic steps will be:
+ //
+ // - for TiKV, delete associated store ID in PD
+ // - because we use network identity as store address, if we want to
+ // recover in place, we should delete the previous store at the same
+ // address. This requires us to set it to tombstone directly because
+ // the data is permanent lost, there is no way to delete it gracefully.
+ // - optionally, Advnaced StatefulSet can be used to recover with
+ // different network identity
+ // - for PD, like TiKV we must delete its member from the cluster
+ // - (EKS only) delete pvcs of failed pods
+ // - in EKS, failed pods on deleted node will be recreated because
+ // the node object is gone too (old pods is recycled by pod gc). But
+ // the newly created pods will be stuck at Pending state because
+ // associated PVs are invalid now. Pods will be recreated by
+ // tidb-operator again when we delete associated PVCs. New PVCs will
+ // be created by statefulset controller and pods will be scheduled to
+ // feasible nodes.
+ // - it's highly recommended to enable `setPVOwnerRef` in
+ // local-volume-provisioner, then orphan PVs will be garbaged
+ // collected and will not cause problem even if the name of deleted
+ // node is used again in the future.
+ // - (GKE only, fixed path) nothing need to do
+ // - Because the node name does not change, old PVs can be used. Note
+ // that `setPVOwnerRef` cannot be enabled because the node object
+ // could get deleted if it takes too long for the instance to
+ // recreate.
+ // - Optionally, you can deleted failed pods to make them to start
+ // soon. This is due to exponential crash loop back off.
+ // - (GKE only, unique paths) delete failed pods and associated PVCs/PVs
+ // - This is because even if the node name does not change, old PVs
+ // are invalid because unique volume paths are used. We must delete
+ // them all and wait for Kubernetes to rcreate and run again.
+ // - PVs must be deleted because the PVs are invalid and should not
+ // exist anymore. We can configure `setPVOwnerRef` to clean unused
+ // PVs when the node object is deleted, but the node object will not
+ // get deleted if the instance is recreated soon.
+ //
+ // Note that:
+ // - We assume local storage is used, otherwise PV can be re-attached
+ // the new node without problem.
+ // - PD and TiKV must have at least 3 replicas, otherwise one node
+ // deletion will cause permanent data loss and the cluster will be unrecoverable.
+ // - Of course, this process can be automated by implementing a
+ // controller integrated with cloud providers. It's outside the scope
+ // of tidb-operator now.
+ // - The same process can apply in bare-metal environment too when a
+ // machine or local storage is permanently gone.
+ //
+ // Differences between EKS and GKE:
+ //
+ // - In EKS, a new node object with different name will be created for
+ // the new machine.
+ // - In GKE (1.11+), the node object are no longer recreated on
+ // upgrade/repair even though the underlying instance is recreated and
+ // local disks are wiped. However, the node object could get deleted by
+ // cloud-controller-manager if it takes too long for the instance to
+ // recreate.
+ //
+ // Related issues:
+ // - https://github.com/pingcap/tidb-operator/issues/1546
+ // - https://github.com/pingcap/tidb-operator/issues/408
+ ginkgo.It("recover tidb cluster from node deletion", func() {
+ supportedProviders := sets.NewString("aws", "gke")
+ if !supportedProviders.Has(framework.TestContext.Provider) {
+ framework.Skipf("current provider is not supported list %v, skipping", supportedProviders.List())
+ }
+
+ ginkgo.By("Wait for all nodes are schedulable")
+ framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
+
+ ginkgo.By("Make sure we have at least 3 schedulable nodes")
+ nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
+ gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">=", 3))
+
+ ginkgo.By("Deploy a test cluster with 3 pd and tikv replicas")
+ clusterName := "test"
+ tc := fixture.GetTidbCluster(ns, clusterName, utilimage.TiDBV3Version)
+ tc.Spec.PD.Replicas = 3
+ tc.Spec.PD.MaxFailoverCount = pointer.Int32Ptr(0)
+ tc.Spec.TiDB.Replicas = 1
+ tc.Spec.TiDB.MaxFailoverCount = pointer.Int32Ptr(0)
+ tc.Spec.TiKV.Replicas = 3
+ tc.Spec.TiKV.MaxFailoverCount = pointer.Int32Ptr(0)
+ err := genericCli.Create(context.TODO(), tc)
+ framework.ExpectNoError(err)
+ err = oa.WaitForTidbClusterReady(tc, 30*time.Minute, 15*time.Second)
+ framework.ExpectNoError(err)
+
+ ginkgo.By("By using tidb-scheduler, 3 TiKV/PD replicas should be on different nodes")
+ allNodes := make(map[string]v1.Node)
+ for _, node := range nodeList.Items {
+ allNodes[node.Name] = node
+ }
+ allTiKVNodes := make(map[string]v1.Node)
+ allPDNodes := make(map[string]v1.Node)
+ listOptions := metav1.ListOptions{
+ LabelSelector: labels.SelectorFromSet(label.New().Instance(clusterName).Labels()).String(),
+ }
+ podList, err := c.CoreV1().Pods(ns).List(listOptions)
+ framework.ExpectNoError(err)
+ for _, pod := range podList.Items {
+ if v, ok := pod.Labels[label.ComponentLabelKey]; !ok {
+ framework.Failf("pod %s/%s does not have component label key %q", pod.Namespace, pod.Name, label.ComponentLabelKey)
+ } else if v == label.PDLabelVal {
+ allPDNodes[pod.Name] = allNodes[pod.Spec.NodeName]
+ } else if v == label.TiKVLabelVal {
+ allTiKVNodes[pod.Name] = allNodes[pod.Spec.NodeName]
+ } else {
+ continue
+ }
+ }
+ gomega.Expect(len(allPDNodes)).To(gomega.BeNumerically("==", 3), "the number of pd nodes should be 3")
+ gomega.Expect(len(allTiKVNodes)).To(gomega.BeNumerically("==", 3), "the number of tikv nodes should be 3")
+
+ ginkgo.By("Deleting a node")
+ var nodeToDelete *v1.Node
+ for _, node := range allTiKVNodes {
+ if nodeToDelete == nil {
+ nodeToDelete = &node
+ break
+ }
+ }
+ gomega.Expect(nodeToDelete).NotTo(gomega.BeNil())
+ var pdPodsOnDeletedNode []v1.Pod
+ var tikvPodsOnDeletedNode []v1.Pod
+ var pvcNamesOnDeletedNode []string
+ for _, pod := range podList.Items {
+ if pod.Spec.NodeName == nodeToDelete.Name {
+ if v, ok := pod.Labels[label.ComponentLabelKey]; ok {
+ if v == label.PDLabelVal {
+ pdPodsOnDeletedNode = append(pdPodsOnDeletedNode, pod)
+ } else if v == label.TiKVLabelVal {
+ tikvPodsOnDeletedNode = append(tikvPodsOnDeletedNode, pod)
+ }
+ }
+ for _, volume := range pod.Spec.Volumes {
+ if volume.PersistentVolumeClaim != nil {
+ pvcNamesOnDeletedNode = append(pvcNamesOnDeletedNode, volume.PersistentVolumeClaim.ClaimName)
+ }
+ }
+ }
+ }
+ gomega.Expect(len(tikvPodsOnDeletedNode)).To(gomega.BeNumerically(">=", 1), "the number of affected tikvs must be equal or greater than 1")
+ err = framework.DeleteNodeOnCloudProvider(nodeToDelete)
+ framework.ExpectNoError(err, fmt.Sprintf("failed to delete node %q", nodeToDelete.Name))
+ framework.Logf("Node %q deleted", nodeToDelete.Name)
+
+ if framework.TestContext.Provider == "aws" {
+ // The node object will be gone with physical machine.
+ ginkgo.By(fmt.Sprintf("[AWS/EKS] Wait for the node object %q to be deleted", nodeToDelete.Name))
+ err = wait.PollImmediate(time.Second*5, time.Minute*5, func() (bool, error) {
+ _, err = c.CoreV1().Nodes().Get(nodeToDelete.Name, metav1.GetOptions{})
+ if err == nil || !apierrors.IsNotFound(err) {
+ return false, nil
+ }
+ return true, nil
+ })
+ framework.ExpectNoError(err)
+
+ ginkgo.By("[AWS/EKS] New instance will be created and join the cluster")
+ _, err := e2enode.CheckReady(c, len(nodeList.Items), 5*time.Minute)
+ framework.ExpectNoError(err)
+
+ ginkgo.By("[AWS/EKS] Initialize newly created node")
+ nodeList, err = c.CoreV1().Nodes().List(metav1.ListOptions{})
+ framework.ExpectNoError(err)
+ initialized := 0
+ for _, node := range nodeList.Items {
+ if _, ok := allNodes[node.Name]; !ok {
+ framework.ExpectNoError(utilnode.InitNode(&node))
+ initialized++
+ }
+ }
+ gomega.Expect(initialized).To(gomega.BeNumerically("==", 1), "must have a node initialized")
+ } else if framework.TestContext.Provider == "gke" {
+ instanceIDAnn := "container.googleapis.com/instance_id"
+ oldInstanceID, ok := nodeToDelete.Annotations[instanceIDAnn]
+ if !ok {
+ framework.Failf("instance label %q not found on node object %q", instanceIDAnn, nodeToDelete.Name)
+ }
+
+ ginkgo.By("[GCP/GKE] Wait for instance ID to be updated")
+ err = wait.PollImmediate(time.Second*5, time.Minute*10, func() (bool, error) {
+ node, err := c.CoreV1().Nodes().Get(nodeToDelete.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, nil
+ }
+ instanceID, ok := node.Annotations[instanceIDAnn]
+ if !ok {
+ return false, nil
+ }
+ if instanceID == oldInstanceID {
+ return false, nil
+ }
+ framework.Logf("instance ID of node %q changed from %q to %q", nodeToDelete.Name, oldInstanceID, instanceID)
+ return true, nil
+ })
+ framework.ExpectNoError(err)
+
+ ginkgo.By("[GCP/GKE] Wait for the node to be ready")
+ e2enode.WaitForNodeToBeReady(c, nodeToDelete.Name, time.Minute*5)
+
+ ginkgo.By(fmt.Sprintf("[GCP/GKE] Initialize underlying machine of node %s", nodeToDelete.Name))
+ node, err := c.CoreV1().Nodes().Get(nodeToDelete.Name, metav1.GetOptions{})
+ framework.ExpectNoError(err)
+ framework.ExpectNoError(utilnode.InitNode(node))
+ }
+
+ ginkgo.By("Mark stores of failed tikv pods as tombstone")
+ pdClient, cancel, err := proxiedpdclient.NewProxiedPDClient(c, fw, ns, clusterName, false, nil)
+ framework.ExpectNoError(err)
+ defer func() {
+ if cancel != nil {
+ cancel()
+ }
+ }()
+ for _, pod := range tikvPodsOnDeletedNode {
+ framework.Logf("Mark tikv store of pod %s/%s as Tombstone", ns, pod.Name)
+ err = wait.PollImmediate(time.Second*3, time.Minute, func() (bool, error) {
+ storeID, err := utiltikv.GetStoreIDByPodName(cli, ns, clusterName, pod.Name)
+ if err != nil {
+ return false, nil
+ }
+ err = pdClient.SetStoreState(storeID, v1alpha1.TiKVStateTombstone)
+ if err != nil {
+ return false, nil
+ }
+ return true, nil
+ })
+ framework.ExpectNoError(err)
+ }
+ ginkgo.By("Delete pd members")
+ for _, pod := range pdPodsOnDeletedNode {
+ framework.Logf("Delete pd member of pod %s/%s", ns, pod.Name)
+ err = wait.PollImmediate(time.Second*3, time.Minute, func() (bool, error) {
+ err = pdClient.DeleteMember(pod.Name)
+ if err != nil {
+ return false, nil
+ }
+ return true, nil
+ })
+ framework.ExpectNoError(err)
+ }
+ cancel()
+ cancel = nil
+
+ if framework.TestContext.Provider == "aws" {
+ // Local storage is gone with the node and local PVs on deleted
+ // node will be unusable.
+ // If `setPVOwnerRef` is enabled in local-volume-provisioner,
+ // local PVs will be deleted when the node object is deleted
+ // and permanently gone in apiserver when associated PVCs are
+ // delete here.
+ ginkgo.By("[AWS/EKS] Delete associated PVCs if they are bound with local PVs")
+ localPVs := make([]string, 0)
+ for _, pvcName := range pvcNamesOnDeletedNode {
+ pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
+ if err != nil && !apierrors.IsNotFound(err) {
+ framework.Failf("apiserver error: %v", err)
+ }
+ if apierrors.IsNotFound(err) {
+ continue
+ }
+ if pvc.Spec.StorageClassName != nil && *pvc.Spec.StorageClassName == "local-storage" {
+ localPVs = append(localPVs, pvc.Spec.VolumeName)
+ err = c.CoreV1().PersistentVolumeClaims(ns).Delete(pvc.Name, &metav1.DeleteOptions{})
+ framework.ExpectNoError(err)
+ }
+ }
+ } else if framework.TestContext.Provider == "gke" {
+ framework.Logf("We are using fixed paths in local PVs in our e2e. PVs of the deleted node are usable though the underlying storage is empty now")
+ // Because of pod exponential crash loop back off, we can
+ // delete the failed pods to make it start soon.
+ // Note that this is optional.
+ ginkgo.By("Deleting the failed pods")
+ for _, pod := range append(tikvPodsOnDeletedNode, pdPodsOnDeletedNode...) {
+ framework.ExpectNoError(c.CoreV1().Pods(ns).Delete(pod.Name, &metav1.DeleteOptions{}))
+ }
+ }
+
+ ginkgo.By("Waiting for tidb cluster to be fully ready")
+ err = oa.WaitForTidbClusterReady(tc, 5*time.Minute, 15*time.Second)
+ framework.ExpectNoError(err)
+ })
+
+ })
+
+})
diff --git a/tests/e2e/tidbcluster/tidbcluster.go b/tests/e2e/tidbcluster/tidbcluster.go
index cf10d24507..cc47d2ac39 100644
--- a/tests/e2e/tidbcluster/tidbcluster.go
+++ b/tests/e2e/tidbcluster/tidbcluster.go
@@ -15,12 +15,17 @@ package tidbcluster
import (
"context"
+ nerrors "errors"
"fmt"
_ "net/http/pprof"
"strconv"
"strings"
"time"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"github.com/pingcap/advanced-statefulset/pkg/apis/apps/v1/helper"
@@ -38,7 +43,9 @@ import (
"github.com/pingcap/tidb-operator/tests/apiserver"
e2econfig "github.com/pingcap/tidb-operator/tests/e2e/config"
utilimage "github.com/pingcap/tidb-operator/tests/e2e/util/image"
+ utilpod "github.com/pingcap/tidb-operator/tests/e2e/util/pod"
"github.com/pingcap/tidb-operator/tests/e2e/util/portforward"
+ utiltidb "github.com/pingcap/tidb-operator/tests/e2e/util/tidb"
"github.com/pingcap/tidb-operator/tests/pkg/apimachinery"
"github.com/pingcap/tidb-operator/tests/pkg/blockwriter"
"github.com/pingcap/tidb-operator/tests/pkg/fixture"
@@ -46,8 +53,10 @@ import (
v1 "k8s.io/api/core/v1"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apimachinery/pkg/api/errors"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
@@ -142,13 +151,6 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() {
"tikv.resources.limits.storage": "1G",
},
},
- {
- Version: utilimage.TiDBTLSVersion,
- Name: "basic-v3-cluster-tls",
- Values: map[string]string{
- "enableTLSCluster": "true",
- },
- },
}
for _, clusterCfg := range clusterCfgs {
@@ -251,12 +253,10 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() {
upgradeVersions := cfg.GetUpgradeTidbVersionsOrDie()
ginkgo.By(fmt.Sprintf("Upgrading tidb cluster from %s to %s", cluster.ClusterVersion, upgradeVersions[0]))
ctx, cancel := context.WithCancel(context.Background())
- assignedNodes := oa.GetTidbMemberAssignedNodesOrDie(&cluster)
cluster.UpgradeAll(upgradeVersions[0])
oa.UpgradeTidbClusterOrDie(&cluster)
oa.CheckUpgradeOrDie(ctx, &cluster)
oa.CheckTidbClusterStatusOrDie(&cluster)
- oa.CheckTidbMemberAssignedNodesOrDie(&cluster, assignedNodes)
cancel()
ginkgo.By("Check webhook is still running")
@@ -293,6 +293,150 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() {
oa.StopInsertDataTo(&clusterA)
})
+ ginkgo.It("Adhoc backup and restore with BR CRD", func() {
+ if framework.TestContext.Provider != "aws" {
+ framework.Skipf("provider is not aws, skipping")
+ }
+ tcNameFrom := "backup"
+ tcNameTo := "restore"
+ serviceAccountName := "tidb-backup-manager"
+ backupFolder := time.Now().Format(time.RFC3339)
+
+ // create backup cluster
+ tcFrom := fixture.GetTidbCluster(ns, tcNameFrom, utilimage.TiDBBRVersion)
+ tcFrom.Spec.PD.Replicas = 1
+ tcFrom.Spec.TiKV.Replicas = 1
+ tcFrom.Spec.TiDB.Replicas = 1
+ err := genericCli.Create(context.TODO(), tcFrom)
+ framework.ExpectNoError(err)
+ err = oa.WaitForTidbClusterReady(tcFrom, 30*time.Minute, 15*time.Second)
+ framework.ExpectNoError(err)
+ clusterFrom := newTidbClusterConfig(e2econfig.TestConfig, ns, tcNameFrom, "", "")
+
+ // create restore cluster
+ tcTo := fixture.GetTidbCluster(ns, tcNameTo, utilimage.TiDBBRVersion)
+ tcTo.Spec.PD.Replicas = 1
+ tcTo.Spec.TiKV.Replicas = 1
+ tcTo.Spec.TiDB.Replicas = 1
+ err = genericCli.Create(context.TODO(), tcTo)
+ framework.ExpectNoError(err)
+ err = oa.WaitForTidbClusterReady(tcTo, 30*time.Minute, 15*time.Second)
+ framework.ExpectNoError(err)
+ clusterTo := newTidbClusterConfig(e2econfig.TestConfig, ns, tcNameTo, "", "")
+
+ // import some data to sql with blockwriter
+ ginkgo.By(fmt.Sprintf("Begin inserting data into cluster %q", clusterFrom.ClusterName))
+ oa.BeginInsertDataToOrDie(&clusterFrom)
+ err = wait.PollImmediate(time.Second*5, time.Minute*5, utiltidb.TiDBIsInserted(fw, tcFrom.GetNamespace(), tcFrom.GetName(), "root", "", "test", "block_writer"))
+ framework.ExpectNoError(err)
+ ginkgo.By(fmt.Sprintf("Stop inserting data into cluster %q", clusterFrom.ClusterName))
+ oa.StopInsertDataTo(&clusterFrom)
+
+ // prepare for create backup/restore CRD
+ backupRole := fixture.GetBackupRole(tcFrom, serviceAccountName)
+ _, err = c.RbacV1beta1().Roles(ns).Create(backupRole)
+ framework.ExpectNoError(err)
+ backupServiceAccount := fixture.GetBackupServiceAccount(tcFrom, serviceAccountName)
+ _, err = c.CoreV1().ServiceAccounts(ns).Create(backupServiceAccount)
+ framework.ExpectNoError(err)
+ backupRoleBinding := fixture.GetBackupRoleBing(tcFrom, serviceAccountName)
+ _, err = c.RbacV1beta1().RoleBindings(ns).Create(backupRoleBinding)
+ framework.ExpectNoError(err)
+ backupSecret := fixture.GetBackupSecret(tcFrom, "")
+ _, err = c.CoreV1().Secrets(ns).Create(backupSecret)
+ framework.ExpectNoError(err)
+ restoreSecret := fixture.GetBackupSecret(tcTo, "")
+ _, err = c.CoreV1().Secrets(ns).Create(restoreSecret)
+ framework.ExpectNoError(err)
+ cred := credentials.NewSharedCredentials("", "default")
+ val, err := cred.Get()
+ framework.ExpectNoError(err)
+ backupS3Secret := fixture.GetS3Secret(tcFrom, val.AccessKeyID, val.SecretAccessKey)
+ _, err = c.CoreV1().Secrets(ns).Create(backupS3Secret)
+ framework.ExpectNoError(err)
+
+ ginkgo.By(fmt.Sprintf("Begion to backup data cluster %q", clusterFrom.ClusterName))
+ // create backup CRD to process backup
+ backup := fixture.GetBackupCRDWithBR(tcFrom, backupFolder)
+ _, err = cli.PingcapV1alpha1().Backups(ns).Create(backup)
+ framework.ExpectNoError(err)
+
+ // check backup is successed
+ err = wait.PollImmediate(5*time.Second, 10*time.Minute, func() (bool, error) {
+ tmpBackup, err := cli.PingcapV1alpha1().Backups(ns).Get(backup.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, err
+ }
+ // Check the status in conditions one by one,
+ // if the status other than complete or failed is running
+ for _, condition := range tmpBackup.Status.Conditions {
+ if condition.Type == v1alpha1.BackupComplete {
+ return true, nil
+ } else if condition.Type == v1alpha1.BackupFailed {
+ return false, errors.NewInternalError(nerrors.New(condition.Reason))
+ }
+ }
+ return false, nil
+ })
+ framework.ExpectNoError(err)
+
+ ginkgo.By(fmt.Sprintf("Begion to Restore data cluster %q", clusterTo.ClusterName))
+ // create restore CRD to process restore
+ restore := fixture.GetRestoreCRDWithBR(tcTo, backupFolder)
+ _, err = cli.PingcapV1alpha1().Restores(ns).Create(restore)
+ framework.ExpectNoError(err)
+
+ // check restore is successed
+ err = wait.PollImmediate(5*time.Second, 10*time.Minute, func() (bool, error) {
+ tmpRestore, err := cli.PingcapV1alpha1().Restores(ns).Get(restore.Name, metav1.GetOptions{})
+ if err != nil {
+ return false, err
+ }
+ // Check the status in conditions one by one,
+ // if the status other than complete or failed is running
+ for _, condition := range tmpRestore.Status.Conditions {
+ if condition.Type == v1alpha1.RestoreComplete {
+ return true, nil
+ } else if condition.Type == v1alpha1.RestoreFailed {
+ return false, errors.NewInternalError(nerrors.New(condition.Reason))
+ }
+ }
+ return false, nil
+ })
+ framework.ExpectNoError(err)
+
+ ginkgo.By(fmt.Sprintf("Check the correctness of cluster %q and %q", clusterFrom.ClusterName, clusterTo.ClusterName))
+ isSame, err := oa.DataIsTheSameAs(&clusterFrom, &clusterTo)
+ framework.ExpectNoError(err)
+ if !isSame {
+ framework.ExpectNoError(nerrors.New("backup database and restore database is not the same"))
+ }
+
+ // delete backup data in S3
+ err = cli.PingcapV1alpha1().Backups(ns).Delete(backup.Name, &metav1.DeleteOptions{})
+ framework.ExpectNoError(err)
+
+ err = wait.PollImmediate(5*time.Second, 5*time.Minute, func() (bool, error) {
+ awsConfig := aws.NewConfig().
+ WithRegion(backup.Spec.S3.Region).
+ WithCredentials(cred)
+ svc := s3.New(session.Must(session.NewSession(awsConfig)))
+ input := &s3.ListObjectsV2Input{
+ Bucket: aws.String(backup.Spec.S3.Bucket),
+ Prefix: aws.String(backup.Spec.S3.Prefix),
+ }
+ result, err := svc.ListObjectsV2(input)
+ if err != nil {
+ return false, err
+ }
+ if *result.KeyCount != 0 {
+ return false, nil
+ }
+ return true, nil
+ })
+ framework.ExpectNoError(err)
+ })
+
ginkgo.It("Test aggregated apiserver", func() {
ginkgo.By(fmt.Sprintf("Starting to test apiserver, test apiserver image: %s", cfg.E2EImage))
framework.Logf("config: %v", config)
@@ -762,18 +906,80 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() {
pvName := pvc.Spec.VolumeName
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
framework.ExpectNoError(err, "Expected fetch tidbmonitor pv success")
- value, existed := pv.Labels[label.ComponentLabelKey]
- framework.ExpectEqual(existed, true)
- framework.ExpectEqual(value, label.TiDBMonitorVal)
- value, existed = pv.Labels[label.InstanceLabelKey]
- framework.ExpectEqual(existed, true)
- framework.ExpectEqual(value, "e2e-monitor")
- value, existed = pv.Labels[label.InstanceLabelKey]
- framework.ExpectEqual(existed, true)
- framework.ExpectEqual(value, "e2e-monitor")
- value, existed = pv.Labels[label.ManagedByLabelKey]
- framework.ExpectEqual(existed, true)
- framework.ExpectEqual(value, label.TiDBOperator)
+
+ err = wait.Poll(5*time.Second, 5*time.Minute, func() (done bool, err error) {
+ value, existed := pv.Labels[label.ComponentLabelKey]
+ if !existed || value != label.TiDBMonitorVal {
+ return false, nil
+ }
+ value, existed = pv.Labels[label.InstanceLabelKey]
+ if !existed || value != "e2e-monitor" {
+ return false, nil
+ }
+
+ value, existed = pv.Labels[label.NameLabelKey]
+ if !existed || value != "tidb-cluster" {
+ return false, nil
+ }
+ value, existed = pv.Labels[label.ManagedByLabelKey]
+ if !existed || value != label.TiDBOperator {
+ return false, nil
+ }
+ return true, nil
+ })
+ framework.ExpectNoError(err, "monitor pv label error")
+
+ // update TidbMonitor and check whether portName is updated and the nodePort is unchanged
+ tm, err = cli.PingcapV1alpha1().TidbMonitors(ns).Get(tm.Name, metav1.GetOptions{})
+ framework.ExpectNoError(err, "fetch latest tidbmonitor error")
+ tm.Spec.Prometheus.Service.Type = corev1.ServiceTypeNodePort
+ tm, err = cli.PingcapV1alpha1().TidbMonitors(ns).Update(tm)
+ framework.ExpectNoError(err, "update tidbmonitor service type error")
+
+ var targetPort int32
+ err = wait.Poll(5*time.Second, 5*time.Minute, func() (done bool, err error) {
+ prometheusSvc, err := c.CoreV1().Services(ns).Get(fmt.Sprintf("%s-prometheus", tm.Name), metav1.GetOptions{})
+ if err != nil {
+ return false, nil
+ }
+ if len(prometheusSvc.Spec.Ports) != 1 {
+ return false, nil
+ }
+ if prometheusSvc.Spec.Type != corev1.ServiceTypeNodePort {
+ return false, nil
+ }
+ targetPort = prometheusSvc.Spec.Ports[0].NodePort
+ return true, nil
+ })
+ framework.ExpectNoError(err, "first update tidbmonitor service error")
+
+ tm, err = cli.PingcapV1alpha1().TidbMonitors(ns).Get(tm.Name, metav1.GetOptions{})
+ framework.ExpectNoError(err, "fetch latest tidbmonitor again error")
+ newPortName := "any-other-word"
+ tm.Spec.Prometheus.Service.PortName = &newPortName
+ tm, err = cli.PingcapV1alpha1().TidbMonitors(ns).Update(tm)
+ framework.ExpectNoError(err, "update tidbmonitor service portName error")
+
+ err = wait.Poll(5*time.Second, 5*time.Minute, func() (done bool, err error) {
+ prometheusSvc, err := c.CoreV1().Services(ns).Get(fmt.Sprintf("%s-prometheus", tm.Name), metav1.GetOptions{})
+ if err != nil {
+ return false, nil
+ }
+ if len(prometheusSvc.Spec.Ports) != 1 {
+ return false, nil
+ }
+ if prometheusSvc.Spec.Type != corev1.ServiceTypeNodePort {
+ return false, nil
+ }
+ if prometheusSvc.Spec.Ports[0].Name != "any-other-word" {
+ return false, nil
+ }
+ if prometheusSvc.Spec.Ports[0].NodePort != targetPort {
+ return false, nil
+ }
+ return true, nil
+ })
+ framework.ExpectNoError(err, "second update tidbmonitor service error")
})
ginkgo.It("[Feature: AdvancedStatefulSet] Upgrading tidb cluster while pods are not consecutive", func() {
@@ -820,6 +1026,67 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() {
framework.ExpectNoError(err)
})
+ ginkgo.It("TiDB cluster can be paused and unpaused", func() {
+ tcName := "paused"
+ tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBV3Version)
+ tc.Spec.PD.Replicas = 1
+ tc.Spec.TiKV.Replicas = 1
+ tc.Spec.TiDB.Replicas = 1
+ err := genericCli.Create(context.TODO(), tc)
+ framework.ExpectNoError(err)
+ err = oa.WaitForTidbClusterReady(tc, 30*time.Minute, 15*time.Second)
+ framework.ExpectNoError(err)
+
+ podListBeforePaused, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
+ framework.ExpectNoError(err)
+
+ ginkgo.By("Pause the tidb cluster")
+ err = controller.GuaranteedUpdate(genericCli, tc, func() error {
+ tc.Spec.Paused = true
+ return nil
+ })
+ framework.ExpectNoError(err)
+ ginkgo.By("Make a change")
+ err = controller.GuaranteedUpdate(genericCli, tc, func() error {
+ tc.Spec.Version = utilimage.TiDBV3UpgradeVersion
+ return nil
+ })
+ framework.ExpectNoError(err)
+
+ ginkgo.By("Check pods are not changed when the tidb cluster is paused")
+ err = utilpod.WaitForPodsAreChanged(c, podListBeforePaused.Items, time.Minute*5)
+ framework.ExpectEqual(err, wait.ErrWaitTimeout, "Pods are changed when the tidb cluster is paused")
+
+ ginkgo.By("Unpause the tidb cluster")
+ err = controller.GuaranteedUpdate(genericCli, tc, func() error {
+ tc.Spec.Paused = false
+ return nil
+ })
+ framework.ExpectNoError(err)
+
+ ginkgo.By("Check the tidb cluster will be upgraded now")
+ listOptions := metav1.ListOptions{
+ LabelSelector: labels.SelectorFromSet(label.New().Instance(tcName).Component(label.TiKVLabelVal).Labels()).String(),
+ }
+ err = wait.PollImmediate(5*time.Second, 15*time.Minute, func() (bool, error) {
+ podList, err := c.CoreV1().Pods(ns).List(listOptions)
+ if err != nil && !apierrors.IsNotFound(err) {
+ return false, err
+ }
+ for _, pod := range podList.Items {
+ for _, c := range pod.Spec.Containers {
+ if c.Name == v1alpha1.TiKVMemberType.String() {
+ if c.Image == tc.TiKVImage() {
+ return true, nil
+ }
+ }
+ }
+ }
+ return false, nil
+ })
+ framework.ExpectNoError(err)
+ })
+
ginkgo.It("tidb-scale: clear TiDB failureMembers when scale TiDB to zero", func() {
cluster := newTidbClusterConfig(e2econfig.TestConfig, ns, "tidb-scale", "admin", "")
cluster.Resources["pd.replicas"] = "3"
@@ -897,16 +1164,16 @@ func newTidbClusterConfig(cfg *tests.Config, ns, clusterName, password, tidbVers
Resources: map[string]string{
"pd.resources.limits.cpu": "1000m",
"pd.resources.limits.memory": "2Gi",
- "pd.resources.requests.cpu": "200m",
- "pd.resources.requests.memory": "200Mi",
+ "pd.resources.requests.cpu": "20m",
+ "pd.resources.requests.memory": "20Mi",
"tikv.resources.limits.cpu": "2000m",
"tikv.resources.limits.memory": "4Gi",
- "tikv.resources.requests.cpu": "200m",
- "tikv.resources.requests.memory": "200Mi",
+ "tikv.resources.requests.cpu": "20m",
+ "tikv.resources.requests.memory": "20Mi",
"tidb.resources.limits.cpu": "2000m",
"tidb.resources.limits.memory": "4Gi",
- "tidb.resources.requests.cpu": "200m",
- "tidb.resources.requests.memory": "200Mi",
+ "tidb.resources.requests.cpu": "20m",
+ "tidb.resources.requests.memory": "20Mi",
"tidb.initSql": strconv.Quote("create database e2e;"),
"discovery.image": cfg.OperatorImage,
},
diff --git a/tests/e2e/util/image/image.go b/tests/e2e/util/image/image.go
index 4444496079..a6ab2c4fb5 100644
--- a/tests/e2e/util/image/image.go
+++ b/tests/e2e/util/image/image.go
@@ -32,6 +32,7 @@ const (
TiDBV3UpgradeVersion = "v3.0.9"
TiDBTLSVersion = TiDBV3Version // must >= 3.0.5
TiDBV2Version = "v2.1.19"
+ TiDBBRVersion = "v4.0.0-beta.1"
)
func ListImages() []string {
diff --git a/tests/e2e/util/node/node.go b/tests/e2e/util/node/node.go
new file mode 100644
index 0000000000..b8a873706f
--- /dev/null
+++ b/tests/e2e/util/node/node.go
@@ -0,0 +1,82 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package node
+
+import (
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/kubernetes/test/e2e/framework"
+ "k8s.io/kubernetes/test/e2e/framework/ssh"
+)
+
+var (
+ awsNodeInitCmd = `
+sudo bash -c '
+test -d /mnt/disks || mkdir -p /mnt/disks
+df -h /mnt/disks
+if mountpoint /mnt/disks &>/dev/null; then
+ echo "info: /mnt/disks is a mountpoint"
+else
+ echo "info: /mnt/disks is not a mountpoint, creating local volumes on the rootfs"
+fi
+cd /mnt/disks
+for ((i = 1; i <= 32; i++)) {
+ if [ ! -d vol$i ]; then
+ mkdir vol$i
+ fi
+ if ! mountpoint vol$i &>/dev/null; then
+ mount --bind vol$i vol$i
+ fi
+}
+echo "info: increase max open files for containers"
+if ! grep -qF "OPTIONS" /etc/sysconfig/docker; then
+ echo 'OPTIONS="--default-ulimit nofile=1024000:1024000"' >> /etc/sysconfig/docker
+fi
+systemctl restart docker
+'
+`
+ // disks are created under /mnt/stateful_partition directory
+ // https://cloud.google.com/container-optimized-os/docs/concepts/disks-and-filesystem
+ gkeNodeInitCmd = `
+sudo bash -c '
+test -d /mnt/stateful_partition/disks || mkdir -p /mnt/stateful_partition/disks
+df -h /mnt/stateful_partition/disks
+test -d /mnt/disks || mkdir -p /mnt/disks
+cd /mnt/disks
+for ((i = 1; i <= 32; i++)) {
+ if [ ! -d vol$i ]; then
+ mkdir vol$i
+ fi
+ if ! mountpoint vol$i &>/dev/null; then
+ if [ ! -d /mnt/stateful_partition/disks/vol$i ]; then
+ mkdir /mnt/stateful_partition/disks/vol$i
+ fi
+ mount --bind /mnt/stateful_partition/disks/vol$i vol$i
+ fi
+}
+'
+`
+)
+
+func InitNode(node *v1.Node) error {
+ var initNodeCmd string
+ if framework.TestContext.Provider == "aws" {
+ initNodeCmd = awsNodeInitCmd
+ } else if framework.TestContext.Provider == "gke" {
+ initNodeCmd = gkeNodeInitCmd
+ } else {
+ framework.Logf("Unknown provider %q, skipped", framework.TestContext.Provider)
+ return nil
+ }
+ return ssh.IssueSSHCommand(initNodeCmd, framework.TestContext.Provider, node)
+}
diff --git a/tests/e2e/util/operator/operator.go b/tests/e2e/util/operator/operator.go
new file mode 100644
index 0000000000..58d0b87188
--- /dev/null
+++ b/tests/e2e/util/operator/operator.go
@@ -0,0 +1,70 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package operator
+
+import (
+ "time"
+
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/kubernetes/test/e2e/framework"
+)
+
+// OperatorKillerConfig describes configuration for operator killer.
+type OperatorKillerConfig struct {
+ Enabled bool
+ // Interval is time between operator failures.
+ Interval time.Duration
+ // Operator pods will be deleted between [Interval, Interval * (1.0 + JitterFactor)].
+ JitterFactor float64
+}
+
+// OperatorKiller deletes pods of tidb-operator to simulate operator failures.
+type OperatorKiller struct {
+ config OperatorKillerConfig
+ client kubernetes.Interface
+ podLister func() ([]v1.Pod, error)
+}
+
+// NewOperatorKiller creates a new operator killer.
+func NewOperatorKiller(config OperatorKillerConfig, client kubernetes.Interface, podLister func() ([]v1.Pod, error)) *OperatorKiller {
+ return &OperatorKiller{
+ config: config,
+ client: client,
+ podLister: podLister,
+ }
+}
+
+// Run starts OperatorKiller until stopCh is closed.
+func (k *OperatorKiller) Run(stopCh <-chan struct{}) {
+ // wait.JitterUntil starts work immediately, so wait first.
+ time.Sleep(wait.Jitter(k.config.Interval, k.config.JitterFactor))
+ wait.JitterUntil(func() {
+ pods, err := k.podLister()
+ if err != nil {
+ framework.Logf("failed to list operator pods: %v", err)
+ return
+ }
+ for _, pod := range pods {
+ err = k.client.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{})
+ if err != nil {
+ framework.Logf("failed to delete pod %s/%s: %v", pod.Namespace, pod.Name, err)
+ } else {
+ framework.Logf("successfully deleted tidb-operator pod %s/%s", pod.Namespace, pod.Name)
+ }
+ }
+ }, k.config.Interval, k.config.JitterFactor, true, stopCh)
+}
diff --git a/tests/e2e/util/pod/pod.go b/tests/e2e/util/pod/pod.go
index b0a95ab8b2..f84abd0ce7 100644
--- a/tests/e2e/util/pod/pod.go
+++ b/tests/e2e/util/pod/pod.go
@@ -26,10 +26,9 @@ import (
testutils "k8s.io/kubernetes/test/utils"
)
-// WaitForPodsAreNotAffected waits for given pods are not affected.
-// It returns wait.ErrWaitTimeout if the given pods are not affected in specified timeout.
-func WaitForPodsAreNotAffected(c kubernetes.Interface, pods []v1.Pod, timeout time.Duration) error {
- return wait.PollImmediate(time.Second*5, timeout, func() (bool, error) {
+// PodsAreChanged checks the given pods are changed or not (recreate, update).
+func PodsAreChanged(c kubernetes.Interface, pods []v1.Pod) wait.ConditionFunc {
+ return func() (bool, error) {
for _, pod := range pods {
podNew, err := c.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
if err != nil {
@@ -46,5 +45,11 @@ func WaitForPodsAreNotAffected(c kubernetes.Interface, pods []v1.Pod, timeout ti
}
}
return false, nil
- })
+ }
+}
+
+// WaitForPodsAreChanged waits for given pods are changed.
+// It returns wait.ErrWaitTimeout if the given pods are not changed in specified timeout.
+func WaitForPodsAreChanged(c kubernetes.Interface, pods []v1.Pod, timeout time.Duration) error {
+ return wait.PollImmediate(time.Second*5, timeout, PodsAreChanged(c, pods))
}
diff --git a/tests/e2e/util/statefulset/statefulset.go b/tests/e2e/util/statefulset/statefulset.go
index 4ee4087a3f..e9c9fa00ce 100644
--- a/tests/e2e/util/statefulset/statefulset.go
+++ b/tests/e2e/util/statefulset/statefulset.go
@@ -19,11 +19,12 @@ import (
"github.com/pingcap/advanced-statefulset/pkg/apis/apps/v1/helper"
appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes"
"k8s.io/klog"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
- e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
)
var statefulPodRegex = regexp.MustCompile("(.*)-([0-9]+)$")
@@ -43,7 +44,11 @@ func GetStatefulPodOrdinal(podName string) int {
// IsAllDesiredPodsRunningAndReady checks if all desired pods of given statefulset are running and ready
func IsAllDesiredPodsRunningAndReady(c kubernetes.Interface, sts *appsv1.StatefulSet) bool {
deleteSlots := helper.GetDeleteSlots(sts)
- actualPodList := e2esset.GetPodList(c, sts)
+ actualPodList, err := getPodList(c, sts)
+ if err != nil {
+ klog.Infof("get podlist error in IsAllDesiredPodsRunningAndReady, err:%v", err)
+ return false
+ }
actualPodOrdinals := sets.NewInt32()
for _, pod := range actualPodList.Items {
actualPodOrdinals.Insert(int32(GetStatefulPodOrdinal(pod.Name)))
@@ -62,3 +67,14 @@ func IsAllDesiredPodsRunningAndReady(c kubernetes.Interface, sts *appsv1.Statefu
klog.Infof("desired pods of sts %s/%s are running and ready (%v)", sts.Namespace, sts.Name, actualPodOrdinals.List())
return true
}
+
+// GetPodList gets the current Pods in ss.
+// e2esset.GetPodList(c, sts) would panic while we want return error if something wrong
+func getPodList(c kubernetes.Interface, ss *appsv1.StatefulSet) (*corev1.PodList, error) {
+ selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector)
+ if err != nil {
+ return nil, err
+ }
+ podList, err := c.CoreV1().Pods(ss.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
+ return podList, err
+}
diff --git a/tests/e2e/util/tidb/tidb.go b/tests/e2e/util/tidb/tidb.go
new file mode 100644
index 0000000000..7d301cf3c8
--- /dev/null
+++ b/tests/e2e/util/tidb/tidb.go
@@ -0,0 +1,104 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tidb
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+
+ // To register MySQL driver
+ _ "github.com/go-sql-driver/mysql"
+ "github.com/pingcap/tidb-operator/pkg/controller"
+ "github.com/pingcap/tidb-operator/tests/e2e/util/portforward"
+ "k8s.io/apimachinery/pkg/util/wait"
+)
+
+var dummyCancel = func() {}
+
+// GetTiDBDSN returns a DSN to use
+func GetTiDBDSN(fw portforward.PortForward, ns, tc, user, password, database string) (string, context.CancelFunc, error) {
+ localHost, localPort, cancel, err := portforward.ForwardOnePort(fw, ns, fmt.Sprintf("svc/%s", controller.TiDBMemberName(tc)), 4000)
+ if err != nil {
+ return "", dummyCancel, err
+ }
+ return fmt.Sprintf("%s:%s@(%s:%d)/%s?charset=utf8", user, password, localHost, localPort, database), cancel, nil
+}
+
+// TiDBIsConnectable checks whether the tidb cluster is connectable.
+func TiDBIsConnectable(fw portforward.PortForward, ns, tc, user, password string) wait.ConditionFunc {
+ return func() (bool, error) {
+ var db *sql.DB
+ dsn, cancel, err := GetTiDBDSN(fw, ns, tc, "root", password, "test")
+ if err != nil {
+ return false, err
+ }
+ defer cancel()
+ if db, err = sql.Open("mysql", dsn); err != nil {
+ return false, err
+ }
+ defer db.Close()
+ if err := db.Ping(); err != nil {
+ return false, err
+ }
+ return true, nil
+ }
+}
+
+// TiDBIsInserted checks whether the tidb cluster has insert some data.
+func TiDBIsInserted(fw portforward.PortForward, ns, tc, user, password, dbName, tableName string) wait.ConditionFunc {
+ return func() (bool, error) {
+ var db *sql.DB
+ dsn, cancel, err := GetTiDBDSN(fw, ns, tc, user, password, dbName)
+ if err != nil {
+ return false, err
+ }
+
+ defer cancel()
+ if db, err = sql.Open("mysql", dsn); err != nil {
+ return false, err
+ }
+
+ defer db.Close()
+ if err := db.Ping(); err != nil {
+ return false, err
+ }
+
+ getCntFn := func(db *sql.DB, tableName string) (int, error) {
+ var cnt int
+ rows, err := db.Query(fmt.Sprintf("SELECT count(*) FROM %s", tableName))
+ if err != nil {
+ return cnt, fmt.Errorf("failed to select count(*) from %s, %v", tableName, err)
+ }
+ for rows.Next() {
+ err := rows.Scan(&cnt)
+ if err != nil {
+ return cnt, fmt.Errorf("failed to scan count from %s, %v", tableName, err)
+ }
+ return cnt, nil
+ }
+ return cnt, fmt.Errorf("can not find count of table %s", tableName)
+ }
+
+ cnt, err := getCntFn(db, tableName)
+ if err != nil {
+ return false, err
+ }
+ if cnt == 0 {
+ return false, nil
+ }
+
+ return true, nil
+ }
+}
diff --git a/tests/e2e/util/tikv/tikv.go b/tests/e2e/util/tikv/tikv.go
new file mode 100644
index 0000000000..1ecbf0d6db
--- /dev/null
+++ b/tests/e2e/util/tikv/tikv.go
@@ -0,0 +1,35 @@
+// Copyright 2020 PingCAP, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tikv
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func GetStoreIDByPodName(c versioned.Interface, ns, clusterName, podName string) (uint64, error) {
+ tc, err := c.PingcapV1alpha1().TidbClusters(ns).Get(clusterName, metav1.GetOptions{})
+ if err != nil {
+ return 0, err
+ }
+ for _, store := range tc.Status.TiKV.Stores {
+ if store.PodName == podName {
+ return strconv.ParseUint(store.ID, 10, 64)
+ }
+ }
+ return 0, fmt.Errorf("tikv store of pod %q not found in cluster %s/%s", podName, ns, clusterName)
+}
diff --git a/tests/examples/001-basic.sh b/tests/examples/001-basic.sh
new file mode 100755
index 0000000000..8f4aec77a8
--- /dev/null
+++ b/tests/examples/001-basic.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+# Copyright 2020 PingCAP, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/../.. && pwd)
+cd $ROOT
+
+source "${ROOT}/hack/lib.sh"
+source "${ROOT}/tests/examples/t.sh"
+
+NS=$(basename ${0%.*})
+
+function cleanup() {
+ kubectl -n $NS delete -f examples/basic/tidb-cluster.yaml
+ kubectl delete ns $NS
+}
+
+trap cleanup EXIT
+
+kubectl create ns $NS
+hack::wait_for_success 10 3 "t::ns_is_active $NS"
+
+kubectl -n $NS apply -f examples/basic/tidb-cluster.yaml
+
+hack::wait_for_success 600 3 "t::tc_is_ready $NS basic"
diff --git a/tests/examples/002-selfsigned-tls.sh b/tests/examples/002-selfsigned-tls.sh
new file mode 100755
index 0000000000..f735269067
--- /dev/null
+++ b/tests/examples/002-selfsigned-tls.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+
+# Copyright 2020 PingCAP, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/../.. && pwd)
+cd $ROOT
+
+source "${ROOT}/hack/lib.sh"
+source "${ROOT}/tests/examples/t.sh"
+
+NS=$(basename ${0%.*})
+CERT_MANAGER_VERSION=0.14.1
+
+PORT_FORWARD_PID=
+
+function cleanup() {
+ if [ -n "$PORT_FORWARD_PID" ]; then
+ echo "info: kill port-forward background process (PID: $PORT_FORWARD_PID)"
+ kill $PORT_FORWARD_PID
+ fi
+ kubectl delete -f examples/selfsigned-tls/ --ignore-not-found
+ kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/v${CERT_MANAGER_VERSION}/cert-manager.yaml --ignore-not-found
+ kubectl delete ns $NS
+}
+
+trap cleanup EXIT
+
+kubectl create ns $NS
+hack::wait_for_success 10 3 "t::ns_is_active $NS"
+
+kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v${CERT_MANAGER_VERSION}/cert-manager.yaml
+hack::wait_for_success 10 3 "t::crds_are_ready certificaterequests.cert-manager.io certificates.cert-manager.io challenges.acme.cert-manager.io clusterissuers.cert-manager.io issuers.cert-manager.io orders.acme.cert-manager.io"
+for d in cert-manager cert-manager-cainjector cert-manager-webhook; do
+ hack::wait_for_success 300 3 "t::deploy_is_ready cert-manager $d"
+ if [ $? -ne 0 ]; then
+ echo "fatal: timed out waiting for the deployment $d to be ready"
+ exit 1
+ fi
+done
+
+kubectl -n $NS apply -f examples/selfsigned-tls/
+
+hack::wait_for_success 300 3 "t::tc_is_ready $NS tls"
+if [ $? -ne 0 ]; then
+ echo "fatal: failed to wait for the cluster to be ready"
+ exit 1
+fi
+
+echo "info: verify mysql client can connect with tidb server with SSL enabled"
+kubectl -n $NS port-forward svc/tls-tidb 4000:4000 &> /tmp/port-forward.log &
+PORT_FORWARD_PID=$!
+
+host=127.0.0.1
+port=4000
+for ((i=0; i < 10; i++)); do
+ nc -zv -w 3 $host $port
+ if [ $? -eq 0 ]; then
+ break
+ else
+ echo "info: failed to connect to $host:$port, sleep 1 second then retry"
+ sleep 1
+ fi
+done
+
+hack::wait_for_success 100 3 "mysql -h 127.0.0.1 -P 4000 -uroot -e 'select tidb_version();'"
+if [ $? -ne 0 ]; then
+ echo "fatal: failed to connect to TiDB"
+ exit 1
+fi
+
+has_ssl=$(mysql -h 127.0.0.1 -P 4000 -uroot --ssl -e "SHOW VARIABLES LIKE '%ssl%';" | awk '/have_ssl/ {print $2}')
+if [[ "$has_ssl" != "YES" ]]; then
+ echo "fatal: ssl is not enabled successfully, has_ssl is '$has_ssl'"
+ exit 1
+fi
+echo "info: ssl is enabled successfully, has_ssl is '$has_ssl'"
diff --git a/tests/examples/t.sh b/tests/examples/t.sh
new file mode 100644
index 0000000000..b25462f35b
--- /dev/null
+++ b/tests/examples/t.sh
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+# Copyright 2020 PingCAP, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/../.. && pwd)
+cd $ROOT
+
+function t::tc_is_ready() {
+ local ns="$1"
+ local name="$2"
+ local pdDesiredReplicas=$(kubectl -n $ns get tc $name -ojsonpath='{.spec.pd.replicas}')
+ local tikvDesiredReplicas=$(kubectl -n $ns get tc $name -ojsonpath='{.spec.tikv.replicas}')
+ local tidbDesiredReplicas=$(kubectl -n $ns get tc $name -ojsonpath='{.spec.tidb.replicas}')
+ local pdReplicas=$(kubectl -n $ns get tc $name -ojsonpath='{.status.pd.statefulSet.readyReplicas}')
+ if [[ "$pdReplicas" != "$pdDesiredReplicas" ]]; then
+ echo "info: [tc/$name] got pd replicas $pdReplicas, expects $pdDesiredReplicas"
+ return 1
+ fi
+ local tikvReplicas=$(kubectl -n $ns get tc $name -ojsonpath='{.status.tikv.statefulSet.readyReplicas}')
+ if [[ "$tikvReplicas" != "$tikvDesiredReplicas" ]]; then
+ echo "info: [tc/$name] got tikv replicas $tikvReplicas, expects $tikvDesiredReplicas"
+ return 1
+ fi
+ local tidbReplicas=$(kubectl -n $ns get tc $name -ojsonpath='{.status.tidb.statefulSet.readyReplicas}')
+ if [[ "$tidbReplicas" != "$tidbDesiredReplicas" ]]; then
+ echo "info: [tc/$name] got tidb replicas $tidbReplicas, expects $tidbDesiredReplicas"
+ return 1
+ fi
+ echo "info: [tc/$name] pd replicas $pdReplicas, tikv replicas $tikvReplicas, tidb replicas $tidbReplicas"
+ return 0
+}
+
+function t::crds_are_ready() {
+ for name in $@; do
+ local established=$(kubectl get crd $name -o json | jq '.status["conditions"][] | select(.type == "Established") | .status')
+ if [ $? -ne 0 ]; then
+ echo "error: crd $name is not found"
+ return 1
+ fi
+ if [[ "$established" != "True" ]]; then
+ echo "error: crd $name is not ready"
+ return 1
+ fi
+ done
+ return 0
+}
+
+function t::ns_is_active() {
+ local ns="$1"
+ local phase=$(kubectl get ns $ns -ojsonpath='{.status.phase}')
+ [[ "$phase" == "Active" ]]
+}
+
+function t::deploy_is_ready() {
+ local ns="$1"
+ local name="$2"
+ read a b <<<$(kubectl -n $ns get deploy/$name -ojsonpath='{.spec.replicas} {.status.readyReplicas}{"\n"}')
+ if [[ "$a" -gt 0 && "$a" -eq "$b" ]]; then
+ echo "info: all pods of deployment $ns/$name are ready (desired: $a, ready: $b)"
+ return 0
+ fi
+ echo "info: pods of deployment $ns/$name (desired: $a, ready: $b)"
+ return 1
+}
+
diff --git a/tests/failover.go b/tests/failover.go
index c61981d33c..6633f6b53c 100644
--- a/tests/failover.go
+++ b/tests/failover.go
@@ -35,7 +35,8 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
- glog "k8s.io/klog"
+ "k8s.io/klog"
+ podutil "k8s.io/kubernetes/pkg/api/v1/pod"
)
func (oa *operatorActions) DeletePDDataThenCheckFailover(info *TidbClusterConfig, pdFailoverPeriod time.Duration) error {
@@ -50,7 +51,7 @@ func (oa *operatorActions) DeletePDDataThenCheckFailover(info *TidbClusterConfig
deletePDDataCmd := fmt.Sprintf("kubectl exec -n %s %s -- rm -rf /var/lib/pd/member", ns, podName)
result, err = exec.Command("/bin/sh", "-c", deletePDDataCmd).CombinedOutput()
if err != nil {
- glog.Error(err)
+ klog.Error(err)
return false, nil
}
return true, nil
@@ -58,17 +59,17 @@ func (oa *operatorActions) DeletePDDataThenCheckFailover(info *TidbClusterConfig
if err != nil {
return fmt.Errorf("failed to delete pod %s/%s data, %s", ns, podName, string(result))
}
- glog.Infof("delete pod %s/%s data successfully", ns, podName)
+ klog.Infof("delete pod %s/%s data successfully", ns, podName)
err = wait.Poll(10*time.Second, failoverTimeout+pdFailoverPeriod, func() (bool, error) {
tc, err := oa.cli.PingcapV1alpha1().TidbClusters(ns).Get(tcName, metav1.GetOptions{})
if err != nil {
- glog.Error(err)
+ klog.Error(err)
return false, nil
}
if len(tc.Status.PD.FailureMembers) == 1 {
- glog.Infof("%#v", tc.Status.PD.FailureMembers)
+ klog.Infof("%#v", tc.Status.PD.FailureMembers)
return true, nil
}
return false, nil
@@ -76,26 +77,36 @@ func (oa *operatorActions) DeletePDDataThenCheckFailover(info *TidbClusterConfig
if err != nil {
return fmt.Errorf("failed to check pd %s/%s failover", ns, podName)
}
- glog.Infof("check pd %s/%s failover successfully", ns, podName)
+ klog.Infof("check pd %s/%s failover successfully", ns, podName)
- tc, err := oa.cli.PingcapV1alpha1().TidbClusters(ns).Get(tcName, metav1.GetOptions{})
- if err != nil {
- return err
- }
- tc.Status.PD.FailureMembers = nil
- tc, err = oa.cli.PingcapV1alpha1().TidbClusters(ns).Update(tc)
+ err = wait.Poll(5*time.Second, 5*time.Minute, func() (done bool, err error) {
+ tc, err := oa.cli.PingcapV1alpha1().TidbClusters(ns).Get(tcName, metav1.GetOptions{})
+ if err != nil {
+ klog.Error(err.Error())
+ return false, nil
+ }
+ if tc.Status.PD.FailureMembers == nil || len(tc.Status.PD.FailureMembers) < 1 {
+ return true, nil
+ }
+ tc.Status.PD.FailureMembers = nil
+ tc, err = oa.cli.PingcapV1alpha1().TidbClusters(ns).Update(tc)
+ if err != nil {
+ klog.Error(err.Error())
+ }
+ return false, nil
+ })
if err != nil {
return err
}
+
err = oa.CheckTidbClusterStatus(info)
if err != nil {
return err
}
- glog.Infof("recover %s/%s successfully", ns, podName)
+ klog.Infof("recover %s/%s successfully", ns, podName)
return nil
}
-
func (oa *operatorActions) DeletePDDataThenCheckFailoverOrDie(info *TidbClusterConfig, pdFailoverPeriod time.Duration) {
if err := oa.DeletePDDataThenCheckFailover(info, pdFailoverPeriod); err != nil {
slack.NotifyAndPanic(err)
@@ -111,21 +122,21 @@ func (oa *operatorActions) TruncateSSTFileThenCheckFailover(info *TidbClusterCon
// checkout latest tidb cluster
tc, err := cli.PingcapV1alpha1().TidbClusters(info.Namespace).Get(info.ClusterName, metav1.GetOptions{})
if err != nil {
- glog.Errorf("failed to get the cluster: ns=%s tc=%s err=%s", info.Namespace, info.ClusterName, err.Error())
+ klog.Errorf("failed to get the cluster: ns=%s tc=%s err=%s", info.Namespace, info.ClusterName, err.Error())
return err
}
// checkout pd config
pdCfg, err := oa.pdControl.GetPDClient(pdapi.Namespace(tc.GetNamespace()), tc.GetName(), tc.IsTLSClusterEnabled()).GetConfig()
if err != nil {
- glog.Errorf("failed to get the pd config: tc=%s err=%s", info.ClusterName, err.Error())
+ klog.Errorf("failed to get the pd config: tc=%s err=%s", info.ClusterName, err.Error())
return err
}
maxStoreDownTime, err := time.ParseDuration(pdCfg.Schedule.MaxStoreDownTime)
if err != nil {
return err
}
- glog.Infof("truncate sst file failover config: maxStoreDownTime=%v tikvFailoverPeriod=%v", maxStoreDownTime, tikvFailoverPeriod)
+ klog.Infof("truncate sst file failover config: maxStoreDownTime=%v tikvFailoverPeriod=%v", maxStoreDownTime, tikvFailoverPeriod)
// find an up store
var store v1alpha1.TiKVStore
@@ -139,16 +150,16 @@ func (oa *operatorActions) TruncateSSTFileThenCheckFailover(info *TidbClusterCon
break
}
if len(store.ID) == 0 {
- glog.Errorf("failed to find an up store")
+ klog.Errorf("failed to find an up store")
return errors.New("no up store for truncating sst file")
}
- glog.Infof("truncate sst file target store: id=%s pod=%s", store.ID, store.PodName)
+ klog.Infof("truncate sst file target store: id=%s pod=%s", store.ID, store.PodName)
oa.EmitEvent(info, fmt.Sprintf("TruncateSSTFile: tikv: %s", store.PodName))
- glog.Infof("deleting pod: [%s/%s] and wait 1 minute for the pod to terminate", info.Namespace, store.PodName)
+ klog.Infof("deleting pod: [%s/%s] and wait 1 minute for the pod to terminate", info.Namespace, store.PodName)
err = cli.CoreV1().Pods(info.Namespace).Delete(store.PodName, nil)
if err != nil {
- glog.Errorf("failed to get delete the pod: ns=%s tc=%s pod=%s err=%s",
+ klog.Errorf("failed to get delete the pod: ns=%s tc=%s pod=%s err=%s",
info.Namespace, info.ClusterName, store.PodName, err.Error())
return err
}
@@ -162,28 +173,31 @@ func (oa *operatorActions) TruncateSSTFileThenCheckFailover(info *TidbClusterCon
Store: store.ID,
})
if err != nil {
- glog.Errorf("failed to truncate the sst file: ns=%s tc=%s store=%s err=%s",
+ klog.Errorf("failed to truncate the sst file: ns=%s tc=%s store=%s err=%s",
info.Namespace, info.ClusterName, store.ID, err.Error())
return err
}
oa.EmitEvent(info, fmt.Sprintf("TruncateSSTFile: tikv: %s/%s", info.Namespace, store.PodName))
// delete tikv pod
- glog.Infof("deleting pod: [%s/%s] again", info.Namespace, store.PodName)
- wait.Poll(10*time.Second, time.Minute, func() (bool, error) {
+ klog.Infof("deleting pod: [%s/%s] again", info.Namespace, store.PodName)
+ err = wait.Poll(10*time.Second, time.Minute, func() (bool, error) {
err = oa.kubeCli.CoreV1().Pods(info.Namespace).Delete(store.PodName, &metav1.DeleteOptions{})
if err != nil {
return false, nil
}
return true, nil
})
+ if err != nil {
+ return err
+ }
tikvOps.SetPoll(DefaultPollInterval, maxStoreDownTime+tikvFailoverPeriod+failoverTimeout)
err = tikvOps.PollTiDBCluster(info.Namespace, info.ClusterName,
func(tc *v1alpha1.TidbCluster, err error) (bool, error) {
_, ok := tc.Status.TiKV.FailureStores[store.ID]
- glog.Infof("cluster: [%s/%s] check if target store failed: %t",
+ klog.Infof("cluster: [%s/%s] check if target store failed: %t",
info.Namespace, info.ClusterName, ok)
if !ok {
return false, nil
@@ -191,13 +205,13 @@ func (oa *operatorActions) TruncateSSTFileThenCheckFailover(info *TidbClusterCon
return true, nil
})
if err != nil {
- glog.Errorf("failed to check truncate sst file: %v", err)
+ klog.Errorf("failed to check truncate sst file: %v", err)
return err
}
if err := wait.Poll(1*time.Minute, 30*time.Minute, func() (bool, error) {
if err := tikvOps.RecoverSSTFile(info.Namespace, podName); err != nil {
- glog.Errorf("failed to recovery sst file %s/%s, %v", info.Namespace, podName, err)
+ klog.Errorf("failed to recovery sst file %s/%s, %v", info.Namespace, podName, err)
return false, nil
}
@@ -206,14 +220,40 @@ func (oa *operatorActions) TruncateSSTFileThenCheckFailover(info *TidbClusterCon
return err
}
- glog.Infof("deleting pod: [%s/%s] again", info.Namespace, store.PodName)
- return wait.Poll(10*time.Second, time.Minute, func() (bool, error) {
+ klog.Infof("deleting pod: [%s/%s] again", info.Namespace, store.PodName)
+ err = wait.Poll(10*time.Second, time.Minute, func() (bool, error) {
err = oa.kubeCli.CoreV1().Pods(info.Namespace).Delete(store.PodName, &metav1.DeleteOptions{})
if err != nil {
return false, nil
}
return true, nil
})
+ if err != nil {
+ return err
+ }
+
+ // clear failure stores
+ err = wait.Poll(8*time.Second, 10*time.Minute, func() (done bool, err error) {
+ tc, err = oa.cli.PingcapV1alpha1().TidbClusters(info.Namespace).Get(info.ClusterName, metav1.GetOptions{})
+ if err != nil {
+ return false, nil
+ }
+ if tc.Status.TiKV.FailureStores == nil || len(tc.Status.TiKV.FailureStores) == 0 {
+ return true, nil
+ }
+ tc.Status.TiKV.FailureStores = nil
+ _, err = oa.cli.PingcapV1alpha1().TidbClusters(info.Namespace).Update(tc)
+ if err != nil {
+ return false, nil
+ }
+ return false, nil
+ })
+ err = oa.CheckTidbClusterStatus(info)
+ if err != nil {
+ return err
+ }
+ klog.Info("TruncateSSTFileThenCheckFailover success")
+ return nil
}
func (oa *operatorActions) TruncateSSTFileThenCheckFailoverOrDie(info *TidbClusterConfig, tikvFailoverPeriod time.Duration) {
@@ -225,14 +265,14 @@ func (oa *operatorActions) TruncateSSTFileThenCheckFailoverOrDie(info *TidbClust
func (oa *operatorActions) CheckFailoverPending(info *TidbClusterConfig, node string, faultPoint *time.Time) (bool, error) {
affectedPods, err := oa.getPodsByNode(info, node)
if err != nil {
- glog.Infof("cluster:[%s] query pods failed,error:%v", info.FullName(), err)
+ klog.Infof("cluster:[%s] query pods failed,error:%v", info.FullName(), err)
return false, nil
}
tc, err := oa.cli.PingcapV1alpha1().TidbClusters(info.Namespace).Get(info.ClusterName, metav1.GetOptions{})
if err != nil {
- glog.Infof("pending failover,failed to get tidbcluster:[%s], error: %v", info.FullName(), err)
+ klog.Infof("pending failover,failed to get tidbcluster:[%s], error: %v", info.FullName(), err)
if strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") {
- glog.Info("create new client")
+ klog.Info("create new client")
newCli, _, _, _, _ := client.NewCliOrDie()
oa.cli = newCli
}
@@ -244,7 +284,7 @@ func (oa *operatorActions) CheckFailoverPending(info *TidbClusterConfig, node st
for _, failureMember := range tc.Status.PD.FailureMembers {
if _, exist := affectedPods[failureMember.PodName]; exist {
err := fmt.Errorf("cluster: [%s] the pd member[%s] should be mark failure after %s", info.FullName(), failureMember.PodName, deadline.Format(time.RFC3339))
- glog.Errorf(err.Error())
+ klog.Errorf(err.Error())
return false, err
}
}
@@ -253,7 +293,7 @@ func (oa *operatorActions) CheckFailoverPending(info *TidbClusterConfig, node st
for _, failureStore := range tc.Status.TiKV.FailureStores {
if _, exist := affectedPods[failureStore.PodName]; exist {
err := fmt.Errorf("cluster: [%s] the tikv store[%s] should be mark failure after %s", info.FullName(), failureStore.PodName, deadline.Format(time.RFC3339))
- glog.Errorf(err.Error())
+ klog.Errorf(err.Error())
// There may have been a failover before
return false, nil
}
@@ -264,13 +304,13 @@ func (oa *operatorActions) CheckFailoverPending(info *TidbClusterConfig, node st
for _, failureMember := range tc.Status.TiDB.FailureMembers {
if _, exist := affectedPods[failureMember.PodName]; exist {
err := fmt.Errorf("cluster: [%s] the tidb member[%s] should be mark failure after %s", info.FullName(), failureMember.PodName, deadline.Format(time.RFC3339))
- glog.Errorf(err.Error())
+ klog.Errorf(err.Error())
return false, err
}
}
}
- glog.Infof("cluster: [%s] operator's failover feature is pending", info.FullName())
+ klog.Infof("cluster: [%s] operator's failover feature is pending", info.FullName())
return false, nil
}
return true, nil
@@ -300,18 +340,18 @@ func (oa *operatorActions) CheckFailoverPendingOrDie(clusters []*TidbClusterConf
func (oa *operatorActions) CheckFailover(info *TidbClusterConfig, node string) (bool, error) {
affectedPods, err := oa.getPodsByNode(info, node)
if err != nil {
- glog.Infof("cluster:[%s] query pods failed,error:%v", info.FullName(), err)
+ klog.Infof("cluster:[%s] query pods failed,error:%v", info.FullName(), err)
return false, nil
}
if len(affectedPods) == 0 {
- glog.Infof("the cluster:[%s] can not be affected by node:[%s]", info.FullName(), node)
+ klog.Infof("the cluster:[%s] can not be affected by node:[%s]", info.FullName(), node)
return true, nil
}
tc, err := oa.cli.PingcapV1alpha1().TidbClusters(info.Namespace).Get(info.ClusterName, metav1.GetOptions{})
if err != nil {
- glog.Errorf("query tidbcluster: [%s] failed, error: %v", info.FullName(), err)
+ klog.Errorf("query tidbcluster: [%s] failed, error: %v", info.FullName(), err)
return false, nil
}
@@ -332,19 +372,19 @@ func (oa *operatorActions) CheckFailover(info *TidbClusterConfig, node string) (
}
}
- glog.Infof("cluster: [%s]'s failover feature has complete", info.FullName())
+ klog.Infof("cluster: [%s]'s failover feature has complete", info.FullName())
return true, nil
}
func (oa *operatorActions) getPodsByNode(info *TidbClusterConfig, node string) (map[string]*corev1.Pod, error) {
selector, err := label.New().Instance(info.ClusterName).Selector()
if err != nil {
- glog.Errorf("cluster:[%s] create selector failed, error:%v", info.FullName(), err)
+ klog.Errorf("cluster:[%s] create selector failed, error:%v", info.FullName(), err)
return nil, err
}
pods, err := oa.kubeCli.CoreV1().Pods(info.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
- glog.Errorf("cluster:[%s] query pods failed, error:%v", info.FullName(), err)
+ klog.Errorf("cluster:[%s] query pods failed, error:%v", info.FullName(), err)
return nil, err
}
podsOfNode := map[string]*corev1.Pod{}
@@ -385,23 +425,36 @@ func (oa *operatorActions) CheckRecover(cluster *TidbClusterConfig) (bool, error
}
if tc.Status.PD.FailureMembers != nil && len(tc.Status.PD.FailureMembers) > 0 {
- glog.Infof("cluster: [%s]'s pd FailureMembers is not nil, continue to wait", cluster.FullName())
+ klog.Infof("cluster: [%s]'s pd FailureMembers is not nil, continue to wait", cluster.FullName())
return false, nil
}
if tc.Status.TiDB.FailureMembers != nil && len(tc.Status.TiDB.FailureMembers) > 0 {
- glog.Infof("cluster: [%s]'s tidb FailureMembers is not nil, continue to wait", cluster.FullName())
+ klog.Infof("cluster: [%s]'s tidb FailureMembers is not nil, continue to wait", cluster.FullName())
return false, nil
}
// recover tikv manually
- if tc.Status.TiKV.FailureStores != nil {
+ klog.Infof("recover tikv[%s/%s] failover manually", cluster.Namespace, cluster.ClusterName)
+ err = wait.Poll(5*time.Second, 10*time.Minute, func() (done bool, err error) {
+ tc, err := oa.cli.PingcapV1alpha1().TidbClusters(cluster.Namespace).Get(cluster.ClusterName, metav1.GetOptions{})
+ if err != nil {
+ return false, nil
+ }
+ if tc.Status.TiKV.FailureStores == nil || len(tc.Status.TiKV.FailureStores) < 1 {
+ klog.Infof("tc[%s/%s] 's tikv failover has been recovered", tc.Namespace, tc.Name)
+ return true, nil
+ }
tc.Status.TiKV.FailureStores = nil
- tc, err = oa.cli.PingcapV1alpha1().TidbClusters(cluster.Namespace).Update(tc)
+ _, err = oa.cli.PingcapV1alpha1().TidbClusters(cluster.Namespace).Update(tc)
if err != nil {
- glog.Errorf("failed to set status.tikv.failureStore to nil, %v", err)
+ klog.Errorf("failed to set status.tikv.failureStore to nil, %v", err)
return false, nil
}
+ return false, nil
+ })
+ if err != nil {
+ return false, err
}
return true, nil
@@ -437,13 +490,13 @@ func (oa *operatorActions) pdFailover(pod *corev1.Pod, tc *v1alpha1.TidbCluster)
}
}
if !failure {
- glog.Infof("tidbCluster:[%s/%s]'s member:[%s] have not become failuremember", tc.Namespace, tc.Name, pod.Name)
+ klog.Infof("tidbCluster:[%s/%s]'s member:[%s] have not become failuremember", tc.Namespace, tc.Name, pod.Name)
return false
}
for _, member := range tc.Status.PD.Members {
if member.Name == pod.GetName() {
- glog.Infof("tidbCluster:[%s/%s]'s status.members still have pd member:[%s]", tc.Namespace, tc.Name, pod.Name)
+ klog.Infof("tidbCluster:[%s/%s]'s status.members still have pd member:[%s]", tc.Namespace, tc.Name, pod.Name)
return false
}
}
@@ -452,7 +505,7 @@ func (oa *operatorActions) pdFailover(pod *corev1.Pod, tc *v1alpha1.TidbCluster)
return true
}
- glog.Infof("cluster: [%s/%s] pd:[%s] failover still not complete", tc.Namespace, tc.Name, pod.GetName())
+ klog.Infof("cluster: [%s/%s] pd:[%s] failover still not complete", tc.Namespace, tc.Name, pod.GetName())
return false
}
@@ -469,7 +522,7 @@ func (oa *operatorActions) tikvFailover(pod *corev1.Pod, tc *v1alpha1.TidbCluste
}
}
if !failure {
- glog.Infof("tidbCluster:[%s/%s]'s store pod:[%s] have not become failuremember", tc.Namespace, tc.Name, pod.Name)
+ klog.Infof("tidbCluster:[%s/%s]'s store pod:[%s] have not become failuremember", tc.Namespace, tc.Name, pod.Name)
return false
}
@@ -483,7 +536,7 @@ func (oa *operatorActions) tikvFailover(pod *corev1.Pod, tc *v1alpha1.TidbCluste
return true
}
- glog.Infof("cluster: [%s/%s] tikv:[%s] failover still not complete", tc.Namespace, tc.Name, pod.GetName())
+ klog.Infof("cluster: [%s/%s] tikv:[%s] failover still not complete", tc.Namespace, tc.Name, pod.GetName())
return false
}
@@ -491,7 +544,7 @@ func (oa *operatorActions) tidbFailover(pod *corev1.Pod, tc *v1alpha1.TidbCluste
failure := false
for _, failureMember := range tc.Status.TiDB.FailureMembers {
if failureMember.PodName == pod.GetName() {
- glog.Infof("tidbCluster:[%s/%s]'s store pod:[%s] have become failuremember", tc.Namespace, tc.Name, pod.Name)
+ klog.Infof("tidbCluster:[%s/%s]'s store pod:[%s] have become failuremember", tc.Namespace, tc.Name, pod.Name)
failure = true
break
}
@@ -510,7 +563,7 @@ func (oa *operatorActions) tidbFailover(pod *corev1.Pod, tc *v1alpha1.TidbCluste
if healthCount == int(tc.Spec.TiDB.Replicas) {
return true
}
- glog.Infof("cluster: [%s/%s] tidb:[%s] failover still not complete", tc.Namespace, tc.Name, pod.GetName())
+ klog.Infof("cluster: [%s/%s] tidb:[%s] failover still not complete", tc.Namespace, tc.Name, pod.GetName())
return false
}
@@ -555,30 +608,30 @@ func (oa *operatorActions) GetNodeMap(info *TidbClusterConfig, component string)
}
func (oa *operatorActions) CheckKubeletDownOrDie(operatorConfig *OperatorConfig, clusters []*TidbClusterConfig, faultNode string) {
- glog.Infof("check k8s/operator/tidbCluster status when kubelet down")
+ klog.Infof("check k8s/operator/tidbCluster status when kubelet down")
time.Sleep(10 * time.Minute)
KeepOrDie(3*time.Second, 10*time.Minute, func() error {
err := oa.CheckK8sAvailable(nil, nil)
if err != nil {
return err
}
- glog.V(4).Infof("k8s cluster is available.")
+ klog.V(4).Infof("k8s cluster is available.")
err = oa.CheckOperatorAvailable(operatorConfig)
if err != nil {
return err
}
- glog.V(4).Infof("tidb operator is available.")
+ klog.V(4).Infof("tidb operator is available.")
err = oa.CheckTidbClustersAvailable(clusters)
if err != nil {
return err
}
- glog.V(4).Infof("all clusters are available")
+ klog.V(4).Infof("all clusters are available")
return nil
})
}
func (oa *operatorActions) CheckEtcdDownOrDie(operatorConfig *OperatorConfig, clusters []*TidbClusterConfig, faultNode string) {
- glog.Infof("check k8s/operator/tidbCluster status when etcd down")
+ klog.Infof("check k8s/operator/tidbCluster status when etcd down")
// kube-apiserver may block 15 min
time.Sleep(20 * time.Minute)
KeepOrDie(3*time.Second, 10*time.Minute, func() error {
@@ -586,23 +639,23 @@ func (oa *operatorActions) CheckEtcdDownOrDie(operatorConfig *OperatorConfig, cl
if err != nil {
return err
}
- glog.V(4).Infof("k8s cluster is available.")
+ klog.V(4).Infof("k8s cluster is available.")
err = oa.CheckOperatorAvailable(operatorConfig)
if err != nil {
return err
}
- glog.V(4).Infof("tidb operator is available.")
+ klog.V(4).Infof("tidb operator is available.")
err = oa.CheckTidbClustersAvailable(clusters)
if err != nil {
return err
}
- glog.V(4).Infof("all clusters are available")
+ klog.V(4).Infof("all clusters are available")
return nil
})
}
func (oa *operatorActions) CheckKubeProxyDownOrDie(operatorConfig *OperatorConfig, clusters []*TidbClusterConfig) {
- glog.Infof("checking k8s/tidbCluster status when kube-proxy down")
+ klog.Infof("checking k8s/tidbCluster status when kube-proxy down")
KeepOrDie(3*time.Second, 10*time.Minute, func() error {
err := oa.CheckK8sAvailable(nil, nil)
@@ -610,75 +663,75 @@ func (oa *operatorActions) CheckKubeProxyDownOrDie(operatorConfig *OperatorConfi
return err
}
- glog.V(4).Infof("k8s cluster is available.")
+ klog.V(4).Infof("k8s cluster is available.")
err = oa.CheckOperatorAvailable(operatorConfig)
if err != nil {
return err
}
- glog.V(4).Infof("tidb operator is available.")
+ klog.V(4).Infof("tidb operator is available.")
err = oa.CheckTidbClustersAvailable(clusters)
if err != nil {
return err
}
- glog.V(4).Infof("all clusters are available.")
+ klog.V(4).Infof("all clusters are available.")
return nil
})
}
func (oa *operatorActions) CheckKubeSchedulerDownOrDie(operatorConfig *OperatorConfig, clusters []*TidbClusterConfig) {
- glog.Infof("verify kube-scheduler is not avaiavble")
+ klog.Infof("verify kube-scheduler is not avaiavble")
if err := waitForComponentStatus(oa.kubeCli, "scheduler", corev1.ComponentHealthy, corev1.ConditionFalse); err != nil {
slack.NotifyAndPanic(fmt.Errorf("failed to stop kube-scheduler: %v", err))
}
- glog.Infof("checking operator/tidbCluster status when kube-scheduler is not available")
+ klog.Infof("checking operator/tidbCluster status when kube-scheduler is not available")
KeepOrDie(3*time.Second, 10*time.Minute, func() error {
err := oa.CheckOperatorAvailable(operatorConfig)
if err != nil {
return err
}
- glog.V(4).Infof("tidb operator is available.")
+ klog.V(4).Infof("tidb operator is available.")
err = oa.CheckTidbClustersAvailable(clusters)
if err != nil {
return err
}
- glog.V(4).Infof("all clusters are available.")
+ klog.V(4).Infof("all clusters are available.")
return nil
})
}
func (oa *operatorActions) CheckKubeControllerManagerDownOrDie(operatorConfig *OperatorConfig, clusters []*TidbClusterConfig) {
- glog.Infof("verify kube-controller-manager is not avaiavble")
+ klog.Infof("verify kube-controller-manager is not avaiavble")
if err := waitForComponentStatus(oa.kubeCli, "controller-manager", corev1.ComponentHealthy, corev1.ConditionFalse); err != nil {
slack.NotifyAndPanic(fmt.Errorf("failed to stop kube-controller-manager: %v", err))
}
- glog.Infof("checking operator/tidbCluster status when kube-controller-manager is not available")
+ klog.Infof("checking operator/tidbCluster status when kube-controller-manager is not available")
KeepOrDie(3*time.Second, 10*time.Minute, func() error {
err := oa.CheckOperatorAvailable(operatorConfig)
if err != nil {
return err
}
- glog.V(4).Infof("tidb operator is available.")
+ klog.V(4).Infof("tidb operator is available.")
err = oa.CheckTidbClustersAvailable(clusters)
if err != nil {
return err
}
- glog.V(4).Infof("all clusters are available.")
+ klog.V(4).Infof("all clusters are available.")
return nil
})
}
func (oa *operatorActions) CheckOneApiserverDownOrDie(operatorConfig *OperatorConfig, clusters []*TidbClusterConfig, faultNode string) {
- glog.Infof("check k8s/operator/tidbCluster status when one apiserver down")
+ klog.Infof("check k8s/operator/tidbCluster status when one apiserver down")
affectedPods := map[string]*corev1.Pod{}
apiserverPod, err := GetKubeApiserverPod(oa.kubeCli, faultNode)
if err != nil {
@@ -724,17 +777,17 @@ func (oa *operatorActions) CheckOneApiserverDownOrDie(operatorConfig *OperatorCo
if err != nil {
return err
}
- glog.V(4).Infof("k8s cluster is available.")
+ klog.V(4).Infof("k8s cluster is available.")
err = oa.CheckOperatorAvailable(operatorConfig)
if err != nil {
return err
}
- glog.V(4).Infof("tidb operator is available.")
+ klog.V(4).Infof("tidb operator is available.")
err = oa.CheckTidbClustersAvailable(clusters)
if err != nil {
return err
}
- glog.V(4).Infof("all clusters is available")
+ klog.V(4).Infof("all clusters is available")
return nil
})
}
@@ -745,13 +798,13 @@ func (oa *operatorActions) CheckAllApiserverDownOrDie(operatorConfig *OperatorCo
if err != nil {
return err
}
- glog.V(4).Infof("all clusters is available")
+ klog.V(4).Infof("all clusters is available")
return nil
})
}
func (oa *operatorActions) CheckOperatorDownOrDie(clusters []*TidbClusterConfig) {
- glog.Infof("checking k8s/tidbCluster status when operator down")
+ klog.Infof("checking k8s/tidbCluster status when operator down")
KeepOrDie(3*time.Second, 10*time.Minute, func() error {
err := oa.CheckK8sAvailable(nil, nil)
@@ -770,10 +823,10 @@ func (oa *operatorActions) CheckK8sAvailableOrDie(excludeNodes map[string]string
}
func (oa *operatorActions) CheckK8sAvailable(excludeNodes map[string]string, excludePods map[string]*corev1.Pod) error {
- return wait.Poll(3*time.Second, time.Minute, func() (bool, error) {
+ return wait.Poll(3*time.Second, 10*time.Minute, func() (bool, error) {
nodes, err := oa.kubeCli.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
- glog.Errorf("failed to list nodes,error:%v", err)
+ klog.Errorf("failed to list nodes,error:%v", err)
return false, nil
}
for _, node := range nodes.Items {
@@ -782,13 +835,14 @@ func (oa *operatorActions) CheckK8sAvailable(excludeNodes map[string]string, exc
}
for _, condition := range node.Status.Conditions {
if condition.Type == corev1.NodeReady && condition.Status != corev1.ConditionTrue {
- return false, fmt.Errorf("node: [%s] is not in running", node.GetName())
+ klog.Infof("node[%s] is not running, condition[%v] status is %v", node.GetName(), condition.Type, condition.Status)
+ return false, nil
}
}
}
systemPods, err := oa.kubeCli.CoreV1().Pods("kube-system").List(metav1.ListOptions{})
if err != nil {
- glog.Errorf("failed to list kube-system pods,error:%v", err)
+ klog.Errorf("failed to list kube-system pods,error:%v", err)
return false, nil
}
for _, pod := range systemPods.Items {
@@ -813,23 +867,23 @@ func (oa *operatorActions) CheckOperatorAvailable(operatorConfig *OperatorConfig
}
controllerDeployment, err := oa.kubeCli.AppsV1().Deployments(operatorConfig.Namespace).Get(tidbControllerName, metav1.GetOptions{})
if err != nil {
- glog.Errorf("failed to get deployment:%s failed,error:%v", tidbControllerName, err)
+ klog.Errorf("failed to get deployment:%s failed,error:%v", tidbControllerName, err)
return false, nil
}
if controllerDeployment.Status.AvailableReplicas != *controllerDeployment.Spec.Replicas {
e = fmt.Errorf("the %s is not available", tidbControllerName)
- glog.Error(e)
+ klog.Error(e)
errCount++
return false, nil
}
schedulerDeployment, err := oa.kubeCli.AppsV1().Deployments(operatorConfig.Namespace).Get(tidbSchedulerName, metav1.GetOptions{})
if err != nil {
- glog.Errorf("failed to get deployment:%s failed,error:%v", tidbSchedulerName, err)
+ klog.Errorf("failed to get deployment:%s failed,error:%v", tidbSchedulerName, err)
return false, nil
}
if schedulerDeployment.Status.AvailableReplicas != *schedulerDeployment.Spec.Replicas {
e = fmt.Errorf("the %s is not available", tidbSchedulerName)
- glog.Error(e)
+ klog.Error(e)
errCount++
return false, nil
}
@@ -864,32 +918,54 @@ var testTableName = "testTable"
func (oa *operatorActions) addDataToCluster(info *TidbClusterConfig) (bool, error) {
dsn, cancel, err := oa.getTiDBDSN(info.Namespace, info.ClusterName, "test", info.Password)
if err != nil {
- glog.Errorf("failed to get TiDB DSN: %v", err)
+ klog.Errorf("failed to get TiDB DSN: %v", err)
return false, nil
}
defer cancel()
db, err := sql.Open("mysql", dsn)
if err != nil {
- glog.Errorf("cluster:[%s] can't open connection to mysql: %v", info.FullName(), err)
+ klog.Errorf("cluster:[%s] can't open connection to mysql: %v", info.FullName(), err)
return false, nil
}
defer db.Close()
_, err = db.Exec(fmt.Sprintf("CREATE TABLE %s (name VARCHAR(64))", testTableName))
if err != nil && !tableAlreadyExist(err) {
- glog.Errorf("cluster:[%s] can't create table to mysql: %v", info.FullName(), err)
+ klog.Errorf("cluster:[%s] can't create table to mysql: %v", info.FullName(), err)
return false, nil
}
_, err = db.Exec(fmt.Sprintf("INSERT INTO %s VALUES (?)", testTableName), "testValue")
if err != nil {
- glog.Errorf("cluster:[%s] can't insert data to mysql: %v", info.FullName(), err)
+ klog.Errorf("cluster:[%s] can't insert data to mysql: %v", info.FullName(), err)
return false, nil
}
return true, nil
}
+func (oa *operatorActions) WaitPodOnNodeReadyOrDie(clusters []*TidbClusterConfig, faultNode string) {
+ err := wait.Poll(1*time.Minute, 60*time.Minute, func() (bool, error) {
+ for _, cluster := range clusters {
+ pods, err := oa.getPodsByNode(cluster, faultNode)
+ if err != nil {
+ return false, nil
+ }
+ for _, pod := range pods {
+ klog.Infof("start to check whether pod[%s/%s] is ready on node[%s]", pod.Namespace, pod.Name, faultNode)
+ ready := podutil.IsPodReady(pod)
+ if !ready {
+ return false, nil
+ }
+ }
+ }
+ return true, nil
+ })
+ if err != nil {
+ slack.NotifyAndPanic(fmt.Errorf("failed to wait pod ready on restarted node"))
+ }
+}
+
func GetPodStatus(pod *corev1.Pod) string {
reason := string(pod.Status.Phase)
if pod.Status.Reason != "" {
diff --git a/tests/fault.go b/tests/fault.go
index 33298ad436..702383111c 100644
--- a/tests/fault.go
+++ b/tests/fault.go
@@ -32,7 +32,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
const (
@@ -95,7 +95,7 @@ type faultTriggerActions struct {
}
func (fa *faultTriggerActions) CheckAndRecoverEnv() error {
- glog.Infof("ensure all nodes are running")
+ klog.Infof("ensure all nodes are running")
for _, physicalNode := range fa.cfg.Nodes {
for _, vNode := range physicalNode.Nodes {
err := fa.StartNode(physicalNode.PhysicalNode, vNode.IP)
@@ -104,21 +104,21 @@ func (fa *faultTriggerActions) CheckAndRecoverEnv() error {
}
}
}
- glog.Infof("ensure all etcds are running")
+ klog.Infof("ensure all etcds are running")
err := fa.StartETCD()
if err != nil {
return err
}
allK8sNodes := getAllK8sNodes(fa.cfg)
- glog.Infof("ensure all kubelets are running")
+ klog.Infof("ensure all kubelets are running")
for _, node := range allK8sNodes {
err := fa.StartKubelet(node)
if err != nil {
return err
}
}
- glog.Infof("ensure all static pods are running")
+ klog.Infof("ensure all static pods are running")
for _, physicalNode := range fa.cfg.APIServers {
for _, vNode := range physicalNode.Nodes {
err := fa.StartKubeAPIServer(vNode.IP)
@@ -135,7 +135,7 @@ func (fa *faultTriggerActions) CheckAndRecoverEnv() error {
}
}
}
- glog.Infof("ensure all kube-proxy are running")
+ klog.Infof("ensure all kube-proxy are running")
err = fa.StartKubeProxy()
if err != nil {
return err
@@ -146,7 +146,7 @@ func (fa *faultTriggerActions) CheckAndRecoverEnv() error {
func (fa *faultTriggerActions) CheckAndRecoverEnvOrDie() {
if err := fa.CheckAndRecoverEnv(); err != nil {
- glog.Fatal(err)
+ klog.Fatal(err)
}
}
@@ -156,7 +156,7 @@ func (fa *faultTriggerActions) StopNode() (string, string, time.Time, error) {
if err != nil {
return "", "", now, err
}
- glog.Infof("selecting %s as the node to failover", node)
+ klog.Infof("selecting %s as the node to failover", node)
physicalNode := getPhysicalNode(node, fa.cfg)
@@ -176,11 +176,11 @@ func (fa *faultTriggerActions) StopNode() (string, string, time.Time, error) {
if err := faultCli.StopVM(&manager.VM{
Name: name,
}); err != nil {
- glog.Errorf("failed to stop node %s on physical node: %s: %v", node, physicalNode, err)
+ klog.Errorf("failed to stop node %s on physical node: %s: %v", node, physicalNode, err)
return "", "", now, err
}
- glog.Infof("node %s on physical node %s is stopped", node, physicalNode)
+ klog.Infof("node %s on physical node %s is stopped", node, physicalNode)
return physicalNode, node, now, nil
}
@@ -216,11 +216,11 @@ func (fa *faultTriggerActions) StartNode(physicalNode string, node string) error
if err := faultCli.StartVM(&manager.VM{
Name: name,
}); err != nil {
- glog.Errorf("failed to start node %s on physical node %s: %v", node, physicalNode, err)
+ klog.Errorf("failed to start node %s on physical node %s: %v", node, physicalNode, err)
return err
}
- glog.Infof("node %s on physical node %s is started", node, physicalNode)
+ klog.Infof("node %s on physical node %s is started", node, physicalNode)
return nil
}
@@ -244,7 +244,7 @@ func (fa *faultTriggerActions) getAllKubeProxyPods() ([]v1.Pod, error) {
// StopKubeProxy stops the kube-proxy service.
func (fa *faultTriggerActions) StopKubeProxy() error {
- glog.Infof("stopping all kube-proxy pods")
+ klog.Infof("stopping all kube-proxy pods")
nodes := getAllK8sNodes(fa.cfg)
pods, err := fa.getAllKubeProxyPods()
if err != nil {
@@ -277,13 +277,13 @@ func (fa *faultTriggerActions) StopKubeProxy() error {
return err
}
for _, pod := range pods {
- glog.Infof("waiting for kube-proxy pod %s/%s to be terminated", pod.Namespace, pod.Name)
+ klog.Infof("waiting for kube-proxy pod %s/%s to be terminated", pod.Namespace, pod.Name)
err = waitForPodNotFoundInNamespace(fa.kubeCli, pod.Name, pod.Namespace, PodTimeout)
if err != nil {
return err
}
}
- glog.Infof("kube-proxy on vm nodes %v are stopped", nodes)
+ klog.Infof("kube-proxy on vm nodes %v are stopped", nodes)
return nil
}
@@ -295,7 +295,7 @@ func (fa *faultTriggerActions) StopKubeProxyOrDie() {
// StartKubeProxy starts the kube-proxy service.
func (fa *faultTriggerActions) StartKubeProxy() error {
- glog.Infof("starting all kube-proxy pods")
+ klog.Infof("starting all kube-proxy pods")
nodes := getAllK8sNodes(fa.cfg)
ds, err := fa.kubeCli.AppsV1().DaemonSets(metav1.NamespaceSystem).Get("kube-proxy", metav1.GetOptions{})
if err != nil {
@@ -327,7 +327,7 @@ func (fa *faultTriggerActions) StartKubeProxy() error {
if err != nil {
return err
}
- glog.Infof("kube-proxy on vm nodes %v are started", nodes)
+ klog.Infof("kube-proxy on vm nodes %v are started", nodes)
return nil
}
@@ -356,7 +356,7 @@ func (fa *faultTriggerActions) StopETCD(nodes ...string) error {
}
func (fa *faultTriggerActions) StopETCDOrDie(nodes ...string) {
- glog.Infof("stopping %v etcds", nodes)
+ klog.Infof("stopping %v etcds", nodes)
if err := fa.StopETCD(nodes...); err != nil {
slack.NotifyAndPanic(err)
}
@@ -380,7 +380,7 @@ func (fa *faultTriggerActions) StopKubelet(nodes ...string) error {
}
func (fa *faultTriggerActions) StopKubeletOrDie(nodes ...string) {
- glog.Infof("stopping %v kubelets", nodes)
+ klog.Infof("stopping %v kubelets", nodes)
if err := fa.StopKubelet(nodes...); err != nil {
slack.NotifyAndPanic(err)
}
@@ -553,11 +553,11 @@ func (fa *faultTriggerActions) serviceAction(node string, serverName string, act
}
if err != nil {
- glog.Errorf("failed to %s %s %s: %v", action, serverName, node, err)
+ klog.Errorf("failed to %s %s %s: %v", action, serverName, node, err)
return err
}
- glog.Infof("%s %s %s successfully", action, serverName, node)
+ klog.Infof("%s %s %s successfully", action, serverName, node)
return nil
}
@@ -576,7 +576,7 @@ func getFaultNode(kubeCli kubernetes.Interface) (string, error) {
err = wait.Poll(2*time.Second, 10*time.Second, func() (bool, error) {
nodes, err = kubeCli.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
- glog.Errorf("trigger node stop failed when get all nodes, error: %v", err)
+ klog.Errorf("trigger node stop failed when get all nodes, error: %v", err)
return false, nil
}
@@ -584,7 +584,7 @@ func getFaultNode(kubeCli kubernetes.Interface) (string, error) {
})
if err != nil {
- glog.Errorf("failed to list nodes: %v", err)
+ klog.Errorf("failed to list nodes: %v", err)
return "", err
}
@@ -592,27 +592,35 @@ func getFaultNode(kubeCli kubernetes.Interface) (string, error) {
return "", fmt.Errorf("the number of nodes cannot be less than 1")
}
- myNode := getMyNodeName()
-
- index := rand.Intn(len(nodes.Items))
- faultNode := nodes.Items[index].Name
- if faultNode != myNode {
- return faultNode, nil
+ listOption := metav1.ListOptions{
+ LabelSelector: labels.SelectorFromSet(map[string]string{
+ "app": "helm",
+ "name": "tiller",
+ }).String(),
+ }
+ pods, err := kubeCli.CoreV1().Pods("kube-system").List(listOption)
+ if err != nil {
+ return "", err
}
+ if len(pods.Items) < 1 {
+ return "", fmt.Errorf("failed to get tiller pods")
+ }
+ tillerNodeName := pods.Items[0].Spec.NodeName
+ myNode := getMyNodeName()
- if index == 0 {
- faultNode = nodes.Items[index+1].Name
- } else {
- faultNode = nodes.Items[index-1].Name
+ var filterNodes []string
+ for _, node := range nodes.Items {
+ if node.Name != myNode && node.Name != tillerNodeName {
+ filterNodes = append(filterNodes, node.Name)
+ }
}
- if faultNode == myNode {
- err := fmt.Errorf("there are at least two nodes with the name %s", myNode)
- glog.Error(err.Error())
- return "", err
+ if filterNodes == nil || len(filterNodes) < 1 {
+ return "", fmt.Errorf("no nodes filtered after selecting nodes and filter the tiller and stabiltiy pod")
}
- return faultNode, nil
+ index := rand.Intn(len(filterNodes))
+ return filterNodes[index], nil
}
func getPhysicalNode(faultNode string, cfg *Config) string {
diff --git a/tests/images/e2e/Dockerfile b/tests/images/e2e/Dockerfile
index 260ec91cf9..cb3ba91562 100644
--- a/tests/images/e2e/Dockerfile
+++ b/tests/images/e2e/Dockerfile
@@ -1,9 +1,11 @@
-FROM alpine:3.10
+FROM debian:buster-slim
ENV KUBECTL_VERSION=v1.12.2
ENV HELM_VERSION=v2.9.1
-RUN apk update && apk add --no-cache ca-certificates curl git openssl bash mysql-client
+RUN apt-get update && \
+ apt-get install -y ca-certificates curl git openssl default-mysql-client unzip && \
+ apt-get install -y python # required by gcloud
RUN curl https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl \
-o /usr/local/bin/kubectl && \
chmod +x /usr/local/bin/kubectl && \
@@ -13,6 +15,9 @@ RUN curl https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VER
mv linux-amd64/helm /usr/local/bin/helm && \
rm -rf linux-amd64 && \
rm helm-${HELM_VERSION}-linux-amd64.tar.gz
+RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \
+ unzip awscliv2.zip && \
+ ./aws/install
ADD tidb-operator /charts/e2e/tidb-operator
ADD tidb-cluster /charts/e2e/tidb-cluster
@@ -24,3 +29,6 @@ ADD bin/e2e.test /usr/local/bin/
ADD bin/webhook /usr/local/bin/
ADD bin/blockwriter /usr/local/bin/
ADD bin/apiserver /usr/local/bin/
+
+ADD entrypoint.sh /usr/local/bin
+ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
diff --git a/tests/images/e2e/entrypoint.sh b/tests/images/e2e/entrypoint.sh
new file mode 100755
index 0000000000..820c594a79
--- /dev/null
+++ b/tests/images/e2e/entrypoint.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+set -e
+
+# Add default command if no command provided or the first argument is an
+# option.
+if [ $# -lt 1 -o "${1:0:1}" = '-' ]; then
+ set -- /usr/local/bin/ginkgo "$@"
+fi
+
+# If google-cloud-sdk is detected, install it.
+if [ -d /google-cloud-sdk ]; then
+ source /google-cloud-sdk/path.bash.inc
+ export CLOUDSDK_CORE_DISABLE_PROMPTS=1
+fi
+
+exec "$@"
diff --git a/tests/images/test-apiserver/Dockerfile b/tests/images/test-apiserver/Dockerfile
deleted file mode 100644
index 98222bfd8e..0000000000
--- a/tests/images/test-apiserver/Dockerfile
+++ /dev/null
@@ -1,3 +0,0 @@
-FROM alpine:3.10
-
-ADD bin/tidb-apiserver /usr/local/bin/tidb-apiserver
diff --git a/tests/manifests/stability/stability.yaml b/tests/manifests/stability/stability.yaml
index d2d55b2f8d..c9d8671118 100644
--- a/tests/manifests/stability/stability.yaml
+++ b/tests/manifests/stability/stability.yaml
@@ -47,8 +47,8 @@ spec:
command:
- /usr/local/bin/stability-test
- --config=/etc/tidb-operator-stability/config.yaml
- - --operator-image=pingcap/tidb-operator:v1.1.0-beta.1
- - --operator-tag=v1.1.0-beta.1
+ - --operator-image=pingcap/tidb-operator:v1.1.0-rc.2
+ - --operator-tag=v1.1.0-rc.2
- --slack-webhook-url=""
volumeMounts:
- mountPath: /logDir
diff --git a/tests/pkg/apimachinery/certs.go b/tests/pkg/apimachinery/certs.go
index 5061b5d836..605c59cd3f 100644
--- a/tests/pkg/apimachinery/certs.go
+++ b/tests/pkg/apimachinery/certs.go
@@ -20,7 +20,7 @@ import (
"k8s.io/client-go/util/cert"
"k8s.io/client-go/util/keyutil"
- glog "k8s.io/klog"
+ "k8s.io/klog"
"k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil"
)
@@ -35,32 +35,32 @@ type CertContext struct {
func SetupServerCert(namespaceName, serviceName string) (*CertContext, error) {
certDir, err := ioutil.TempDir("", "test-e2e-server-cert")
if err != nil {
- glog.Errorf("Failed to create a temp dir for cert generation %v", err)
+ klog.Errorf("Failed to create a temp dir for cert generation %v", err)
return nil, err
}
defer os.RemoveAll(certDir)
signingKey, err := pkiutil.NewPrivateKey()
if err != nil {
- glog.Errorf("Failed to create CA private key %v", err)
+ klog.Errorf("Failed to create CA private key %v", err)
return nil, err
}
signingCert, err := cert.NewSelfSignedCACert(cert.Config{CommonName: "e2e-server-cert-ca"}, signingKey)
if err != nil {
- glog.Errorf("Failed to create CA cert for apiserver %v", err)
+ klog.Errorf("Failed to create CA cert for apiserver %v", err)
return nil, err
}
caCertFile, err := ioutil.TempFile(certDir, "ca.crt")
if err != nil {
- glog.Errorf("Failed to create a temp file for ca cert generation %v", err)
+ klog.Errorf("Failed to create a temp file for ca cert generation %v", err)
return nil, err
}
if err := ioutil.WriteFile(caCertFile.Name(), pkiutil.EncodeCertPEM(signingCert), 0644); err != nil {
- glog.Errorf("Failed to write CA cert %v", err)
+ klog.Errorf("Failed to write CA cert %v", err)
return nil, err
}
key, err := pkiutil.NewPrivateKey()
if err != nil {
- glog.Errorf("Failed to create private key for %v", err)
+ klog.Errorf("Failed to create private key for %v", err)
return nil, err
}
signedCert, err := pkiutil.NewSignedCert(
@@ -71,21 +71,21 @@ func SetupServerCert(namespaceName, serviceName string) (*CertContext, error) {
key, signingCert, signingKey,
)
if err != nil {
- glog.Errorf("Failed to create cert%v", err)
+ klog.Errorf("Failed to create cert%v", err)
return nil, err
}
certFile, err := ioutil.TempFile(certDir, "server.crt")
if err != nil {
- glog.Errorf("Failed to create a temp file for cert generation %v", err)
+ klog.Errorf("Failed to create a temp file for cert generation %v", err)
return nil, err
}
keyFile, err := ioutil.TempFile(certDir, "server.key")
if err != nil {
- glog.Errorf("Failed to create a temp file for key generation %v", err)
+ klog.Errorf("Failed to create a temp file for key generation %v", err)
return nil, err
}
if err = ioutil.WriteFile(certFile.Name(), pkiutil.EncodeCertPEM(signedCert), 0600); err != nil {
- glog.Errorf("Failed to write cert file %v", err)
+ klog.Errorf("Failed to write cert file %v", err)
return nil, err
}
keyPEM, err := keyutil.MarshalPrivateKeyToPEM(key)
@@ -93,7 +93,7 @@ func SetupServerCert(namespaceName, serviceName string) (*CertContext, error) {
return nil, err
}
if err = ioutil.WriteFile(keyFile.Name(), keyPEM, 0644); err != nil {
- glog.Errorf("Failed to write key file %v", err)
+ klog.Errorf("Failed to write key file %v", err)
return nil, err
}
return &CertContext{
diff --git a/tests/pkg/blockwriter/blockwriter.go b/tests/pkg/blockwriter/blockwriter.go
index 00eba3e284..a19c96133e 100644
--- a/tests/pkg/blockwriter/blockwriter.go
+++ b/tests/pkg/blockwriter/blockwriter.go
@@ -25,7 +25,7 @@ import (
"github.com/pingcap/tidb-operator/tests/pkg/util"
"k8s.io/apimachinery/pkg/util/wait"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
const (
@@ -96,7 +96,7 @@ func (c *BlockWriterCase) newBlockWriter() *blockWriter {
func (c *BlockWriterCase) generateQuery(ctx context.Context, queryChan chan []string, wg *sync.WaitGroup) {
defer func() {
- glog.Infof("[%s] [%s] [action: generate Query] stopped", c, c.ClusterName)
+ klog.Infof("[%s] [%s] [action: generate Query] stopped", c, c.ClusterName)
wg.Done()
}()
@@ -126,7 +126,7 @@ func (c *BlockWriterCase) generateQuery(ctx context.Context, queryChan chan []st
case queryChan <- querys:
continue
default:
- glog.V(4).Infof("[%s] [%s] [action: generate Query] query channel is full, sleep 10 seconds", c, c.ClusterName)
+ klog.V(4).Infof("[%s] [%s] [action: generate Query] query channel is full, sleep 10 seconds", c, c.ClusterName)
util.Sleep(ctx, 10*time.Second)
}
}
@@ -135,7 +135,7 @@ func (c *BlockWriterCase) generateQuery(ctx context.Context, queryChan chan []st
func (bw *blockWriter) batchExecute(db *sql.DB, query string) error {
_, err := db.Exec(query)
if err != nil {
- glog.V(4).Infof("exec sql [%s] failed, err: %v", query, err)
+ klog.V(4).Infof("exec sql [%s] failed, err: %v", query, err)
return err
}
@@ -143,7 +143,7 @@ func (bw *blockWriter) batchExecute(db *sql.DB, query string) error {
}
func (bw *blockWriter) run(ctx context.Context, db *sql.DB, queryChan chan []string) {
- defer glog.Infof("run stopped")
+ defer klog.Infof("run stopped")
for {
select {
case <-ctx.Done():
@@ -163,7 +163,7 @@ func (bw *blockWriter) run(ctx context.Context, db *sql.DB, queryChan chan []str
return
default:
if err := bw.batchExecute(db, query); err != nil {
- glog.V(4).Info(err)
+ klog.V(4).Info(err)
time.Sleep(5 * time.Second)
continue
}
@@ -174,10 +174,10 @@ func (bw *blockWriter) run(ctx context.Context, db *sql.DB, queryChan chan []str
// Initialize inits case
func (c *BlockWriterCase) initialize(db *sql.DB) error {
- glog.Infof("[%s] [%s] start to init...", c, c.ClusterName)
+ klog.Infof("[%s] [%s] start to init...", c, c.ClusterName)
defer func() {
atomic.StoreUint32(&c.isInit, 1)
- glog.Infof("[%s] [%s] init end...", c, c.ClusterName)
+ klog.Infof("[%s] [%s] init end...", c, c.ClusterName)
}()
for i := 0; i < c.cfg.TableNum; i++ {
@@ -196,7 +196,7 @@ func (c *BlockWriterCase) initialize(db *sql.DB) error {
err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
_, err := db.Exec(tmt)
if err != nil {
- glog.Warningf("[%s] exec sql [%s] failed, err: %v, retry...", c, tmt, err)
+ klog.Warningf("[%s] exec sql [%s] failed, err: %v, retry...", c, tmt, err)
return false, nil
}
@@ -204,7 +204,7 @@ func (c *BlockWriterCase) initialize(db *sql.DB) error {
})
if err != nil {
- glog.Errorf("[%s] exec sql [%s] failed, err: %v", c, tmt, err)
+ klog.Errorf("[%s] exec sql [%s] failed, err: %v", c, tmt, err)
return err
}
}
@@ -216,13 +216,13 @@ func (c *BlockWriterCase) initialize(db *sql.DB) error {
func (c *BlockWriterCase) Start(db *sql.DB) error {
if !atomic.CompareAndSwapUint32(&c.isRunning, 0, 1) {
err := fmt.Errorf("[%s] [%s] is running, you can't start it again", c, c.ClusterName)
- glog.Error(err)
+ klog.Error(err)
return nil
}
defer func() {
c.RLock()
- glog.Infof("[%s] [%s] stopped", c, c.ClusterName)
+ klog.Infof("[%s] [%s] stopped", c, c.ClusterName)
atomic.SwapUint32(&c.isRunning, 0)
}()
@@ -232,7 +232,7 @@ func (c *BlockWriterCase) Start(db *sql.DB) error {
}
}
- glog.Infof("[%s] [%s] start to execute case...", c, c.ClusterName)
+ klog.Infof("[%s] [%s] start to execute case...", c, c.ClusterName)
var wg sync.WaitGroup
@@ -255,7 +255,7 @@ loop:
for {
select {
case <-c.stopChan:
- glog.Infof("[%s] stoping...", c)
+ klog.Infof("[%s] stoping...", c)
cancel()
break loop
default:
diff --git a/tests/pkg/client/client_test.go b/tests/pkg/client/client_test.go
index 7ac4b78e3c..905d2a77a2 100644
--- a/tests/pkg/client/client_test.go
+++ b/tests/pkg/client/client_test.go
@@ -18,7 +18,7 @@ import (
fclient "github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/client"
"github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/manager"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
func TestClientConn(t *testing.T) {
@@ -29,6 +29,6 @@ func TestClientConn(t *testing.T) {
if err := faultCli.StopVM(&manager.VM{
Name: "105",
}); err != nil {
- glog.Errorf("failed to start node on physical node %v", err)
+ klog.Errorf("failed to start node on physical node %v", err)
}
}
diff --git a/tests/pkg/fault-trigger/api/response.go b/tests/pkg/fault-trigger/api/response.go
index a542246661..da76b3a614 100644
--- a/tests/pkg/fault-trigger/api/response.go
+++ b/tests/pkg/fault-trigger/api/response.go
@@ -31,7 +31,7 @@ import (
"net/http"
"github.com/juju/errors"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// Response defines a new response struct for http
@@ -71,7 +71,7 @@ func ExtractResponse(data []byte) ([]byte, error) {
if respData.StatusCode != http.StatusOK {
d, err := json.Marshal(respData.Payload)
if err != nil {
- glog.Errorf("marshal data failed %v", d)
+ klog.Errorf("marshal data failed %v", d)
}
return d, errors.New(respData.Message)
diff --git a/tests/pkg/fault-trigger/api/server.go b/tests/pkg/fault-trigger/api/server.go
index 48b3cfb078..cb39aa0f32 100644
--- a/tests/pkg/fault-trigger/api/server.go
+++ b/tests/pkg/fault-trigger/api/server.go
@@ -30,7 +30,7 @@ import (
restful "github.com/emicklei/go-restful"
"github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/manager"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// Server is a web service to control fault trigger
@@ -54,8 +54,8 @@ func (s *Server) StartServer() {
restful.Add(ws)
- glog.Infof("starting fault-trigger server, listening on 0.0.0.0:%d", s.port)
- glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", s.port), nil))
+ klog.Infof("starting fault-trigger server, listening on 0.0.0.0:%d", s.port)
+ klog.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", s.port), nil))
}
func (s *Server) listVMs(req *restful.Request, resp *restful.Response) {
@@ -64,7 +64,7 @@ func (s *Server) listVMs(req *restful.Request, resp *restful.Response) {
if err != nil {
res.message(err.Error()).statusCode(http.StatusInternalServerError)
if err = resp.WriteEntity(res); err != nil {
- glog.Errorf("failed to response, methods: listVMs, error: %v", err)
+ klog.Errorf("failed to response, methods: listVMs, error: %v", err)
}
return
}
@@ -72,7 +72,7 @@ func (s *Server) listVMs(req *restful.Request, resp *restful.Response) {
res.payload(vms).statusCode(http.StatusOK)
if err = resp.WriteEntity(res); err != nil {
- glog.Errorf("failed to response, method: listVMs, error: %v", err)
+ klog.Errorf("failed to response, method: listVMs, error: %v", err)
}
}
@@ -85,7 +85,7 @@ func (s *Server) startVM(req *restful.Request, resp *restful.Response) {
res.message(fmt.Sprintf("failed to get vm %s, error: %v", name, err)).
statusCode(http.StatusInternalServerError)
if err = resp.WriteEntity(res); err != nil {
- glog.Errorf("failed to response, methods: startVM, error: %v", err)
+ klog.Errorf("failed to response, methods: startVM, error: %v", err)
}
return
}
@@ -93,7 +93,7 @@ func (s *Server) startVM(req *restful.Request, resp *restful.Response) {
if targetVM == nil {
res.message(fmt.Sprintf("vm %s not found", name)).statusCode(http.StatusNotFound)
if err = resp.WriteEntity(res); err != nil {
- glog.Errorf("failed to response, methods: startVM, error: %v", err)
+ klog.Errorf("failed to response, methods: startVM, error: %v", err)
}
return
}
@@ -110,7 +110,7 @@ func (s *Server) stopVM(req *restful.Request, resp *restful.Response) {
res.message(fmt.Sprintf("failed to get vm %s, error: %v", name, err)).
statusCode(http.StatusInternalServerError)
if err = resp.WriteEntity(res); err != nil {
- glog.Errorf("failed to response, methods: stopVM, error: %v", err)
+ klog.Errorf("failed to response, methods: stopVM, error: %v", err)
}
return
}
@@ -118,7 +118,7 @@ func (s *Server) stopVM(req *restful.Request, resp *restful.Response) {
if targetVM == nil {
res.message(fmt.Sprintf("vm %s not found", name)).statusCode(http.StatusNotFound)
if err = resp.WriteEntity(res); err != nil {
- glog.Errorf("failed to response, methods: stopVM, error: %v", err)
+ klog.Errorf("failed to response, methods: stopVM, error: %v", err)
}
return
}
@@ -178,7 +178,7 @@ func (s *Server) action(
res.message(fmt.Sprintf("failed to %s, error: %v", method, err)).
statusCode(http.StatusInternalServerError)
if err = resp.WriteEntity(res); err != nil {
- glog.Errorf("failed to response, methods: %s, error: %v", method, err)
+ klog.Errorf("failed to response, methods: %s, error: %v", method, err)
}
return
}
@@ -186,7 +186,7 @@ func (s *Server) action(
res.message("OK").statusCode(http.StatusOK)
if err := resp.WriteEntity(res); err != nil {
- glog.Errorf("failed to response, method: %s, error: %v", method, err)
+ klog.Errorf("failed to response, method: %s, error: %v", method, err)
}
}
@@ -202,7 +202,7 @@ func (s *Server) vmAction(
res.message(fmt.Sprintf("failed to %s vm: %s, error: %v", method, targetVM.Name, err)).
statusCode(http.StatusInternalServerError)
if err = resp.WriteEntity(res); err != nil {
- glog.Errorf("failed to response, methods: %s, error: %v", method, err)
+ klog.Errorf("failed to response, methods: %s, error: %v", method, err)
}
return
}
@@ -210,7 +210,7 @@ func (s *Server) vmAction(
res.message("OK").statusCode(http.StatusOK)
if err := resp.WriteEntity(res); err != nil {
- glog.Errorf("failed to response, method: %s, error: %v", method, err)
+ klog.Errorf("failed to response, method: %s, error: %v", method, err)
}
}
@@ -226,7 +226,7 @@ func (s *Server) kubeProxyAction(
res.message(fmt.Sprintf("failed to invoke %s, nodeName: %s, error: %v", method, nodeName, err)).
statusCode(http.StatusInternalServerError)
if err = resp.WriteEntity(res); err != nil {
- glog.Errorf("failed to response, methods: %s, error: %v", method, err)
+ klog.Errorf("failed to response, methods: %s, error: %v", method, err)
}
return
}
@@ -234,7 +234,7 @@ func (s *Server) kubeProxyAction(
res.message("OK").statusCode(http.StatusOK)
if err := resp.WriteEntity(res); err != nil {
- glog.Errorf("failed to response, method: %s, error: %v", method, err)
+ klog.Errorf("failed to response, method: %s, error: %v", method, err)
}
}
diff --git a/tests/pkg/fault-trigger/client/client.go b/tests/pkg/fault-trigger/client/client.go
index 2a8bac3a1b..2a6d151d16 100644
--- a/tests/pkg/fault-trigger/client/client.go
+++ b/tests/pkg/fault-trigger/client/client.go
@@ -23,7 +23,7 @@ import (
"github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/api"
"github.com/pingcap/tidb-operator/tests/pkg/fault-trigger/manager"
"github.com/pingcap/tidb-operator/tests/pkg/util"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// Client is a fault-trigger client
@@ -153,7 +153,7 @@ func (c *client) ListVMs() ([]*manager.VM, error) {
url := util.GenURL(fmt.Sprintf("%s%s/vms", c.cfg.Addr, api.APIPrefix))
data, err := c.get(url)
if err != nil {
- glog.Errorf("failed to get %s: %v", url, err)
+ klog.Errorf("failed to get %s: %v", url, err)
return nil, err
}
@@ -174,7 +174,7 @@ func (c *client) StartVM(vm *manager.VM) error {
url := util.GenURL(fmt.Sprintf("%s%s/vm/%s/start", c.cfg.Addr, api.APIPrefix, vmName))
if _, err := c.post(url, nil); err != nil {
- glog.Errorf("faled to post %s: %v", url, err)
+ klog.Errorf("faled to post %s: %v", url, err)
return err
}
@@ -190,7 +190,7 @@ func (c *client) StopVM(vm *manager.VM) error {
url := util.GenURL(fmt.Sprintf("%s%s/vm/%s/stop", c.cfg.Addr, api.APIPrefix, vmName))
if _, err := c.post(url, nil); err != nil {
- glog.Errorf("faled to post %s: %v", url, err)
+ klog.Errorf("faled to post %s: %v", url, err)
return err
}
@@ -240,7 +240,7 @@ func (c *client) StopKubeControllerManager() error {
func (c *client) startService(serviceName string) error {
url := util.GenURL(fmt.Sprintf("%s%s/%s/start", c.cfg.Addr, api.APIPrefix, serviceName))
if _, err := c.post(url, nil); err != nil {
- glog.Errorf("failed to post %s: %v", url, err)
+ klog.Errorf("failed to post %s: %v", url, err)
return err
}
@@ -250,7 +250,7 @@ func (c *client) startService(serviceName string) error {
func (c *client) stopService(serviceName string) error {
url := util.GenURL(fmt.Sprintf("%s%s/%s/stop", c.cfg.Addr, api.APIPrefix, serviceName))
if _, err := c.post(url, nil); err != nil {
- glog.Errorf("failed to post %s: %v", url, err)
+ klog.Errorf("failed to post %s: %v", url, err)
return err
}
diff --git a/tests/pkg/fault-trigger/manager/static_pod_service.go b/tests/pkg/fault-trigger/manager/static_pod_service.go
index e07df0e5c4..49d0a09cd7 100644
--- a/tests/pkg/fault-trigger/manager/static_pod_service.go
+++ b/tests/pkg/fault-trigger/manager/static_pod_service.go
@@ -18,7 +18,7 @@ import (
"os"
"os/exec"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
const (
@@ -66,7 +66,7 @@ func (m *Manager) StopKubeControllerManager() error {
func (m *Manager) stopStaticPodService(serviceName string, fileName string) error {
manifest := fmt.Sprintf("%s/%s", staticPodPath, fileName)
if _, err := os.Stat(manifest); os.IsNotExist(err) {
- glog.Infof("%s had been stopped before", serviceName)
+ klog.Infof("%s had been stopped before", serviceName)
return nil
}
shell := fmt.Sprintf("mkdir -p %s && mv %s %s", staticPodTmpPath, manifest, staticPodTmpPath)
@@ -74,11 +74,11 @@ func (m *Manager) stopStaticPodService(serviceName string, fileName string) erro
cmd := exec.Command("/bin/sh", "-c", shell)
output, err := cmd.CombinedOutput()
if err != nil {
- glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
+ klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
return err
}
- glog.Infof("%s is stopped", serviceName)
+ klog.Infof("%s is stopped", serviceName)
return nil
}
@@ -86,7 +86,7 @@ func (m *Manager) stopStaticPodService(serviceName string, fileName string) erro
func (m *Manager) startStaticPodService(serviceName string, fileName string) error {
manifest := fmt.Sprintf("%s/%s", staticPodTmpPath, fileName)
if _, err := os.Stat(manifest); os.IsNotExist(err) {
- glog.Infof("%s had been started before", serviceName)
+ klog.Infof("%s had been started before", serviceName)
return nil
}
shell := fmt.Sprintf("mv %s %s", manifest, staticPodPath)
@@ -94,11 +94,11 @@ func (m *Manager) startStaticPodService(serviceName string, fileName string) err
cmd := exec.Command("/bin/sh", "-c", shell)
output, err := cmd.CombinedOutput()
if err != nil {
- glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
+ klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
return err
}
- glog.Infof("%s is started", serviceName)
+ klog.Infof("%s is started", serviceName)
return nil
}
diff --git a/tests/pkg/fault-trigger/manager/systemctl_service.go b/tests/pkg/fault-trigger/manager/systemctl_service.go
index b38a046701..52b8f2122e 100644
--- a/tests/pkg/fault-trigger/manager/systemctl_service.go
+++ b/tests/pkg/fault-trigger/manager/systemctl_service.go
@@ -17,7 +17,7 @@ import (
"fmt"
"os/exec"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
const (
@@ -50,11 +50,11 @@ func (m *Manager) systemctlStartService(serviceName string) error {
cmd := exec.Command("/bin/sh", "-c", shell)
output, err := cmd.CombinedOutput()
if err != nil {
- glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
+ klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
return err
}
- glog.Infof("%s is started", serviceName)
+ klog.Infof("%s is started", serviceName)
return nil
}
@@ -64,11 +64,11 @@ func (m *Manager) systemctlStopService(serviceName string) error {
cmd := exec.Command("/bin/sh", "-c", shell)
output, err := cmd.CombinedOutput()
if err != nil {
- glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
+ klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
return err
}
- glog.Infof("%s is stopped", serviceName)
+ klog.Infof("%s is stopped", serviceName)
return nil
}
diff --git a/tests/pkg/fault-trigger/manager/vm_qm.go b/tests/pkg/fault-trigger/manager/vm_qm.go
index 91411c89d7..c5f822b42c 100644
--- a/tests/pkg/fault-trigger/manager/vm_qm.go
+++ b/tests/pkg/fault-trigger/manager/vm_qm.go
@@ -18,7 +18,7 @@ import (
"os/exec"
"strings"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
type QMVMManager struct {
@@ -33,7 +33,7 @@ func (qm *QMVMManager) ListVMs() ([]*VM, error) {
cmd := exec.Command("/bin/sh", "-c", shell)
output, err := cmd.CombinedOutput()
if err != nil {
- glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
+ klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
return nil, err
}
vms := qm.parserVMs(string(output))
@@ -45,11 +45,11 @@ func (qm *QMVMManager) StartVM(vm *VM) error {
cmd := exec.Command("/bin/sh", "-c", shell)
output, err := cmd.CombinedOutput()
if err != nil {
- glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
+ klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
return err
}
- glog.Infof("virtual machine %s is started", vm.Name)
+ klog.Infof("virtual machine %s is started", vm.Name)
return nil
}
@@ -59,11 +59,11 @@ func (qm *QMVMManager) StopVM(vm *VM) error {
cmd := exec.Command("/bin/sh", "-c", shell)
output, err := cmd.CombinedOutput()
if err != nil {
- glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
+ klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
return err
}
- glog.Infof("virtual machine %s is stopped", vm.Name)
+ klog.Infof("virtual machine %s is stopped", vm.Name)
return nil
}
diff --git a/tests/pkg/fault-trigger/manager/vm_virsh.go b/tests/pkg/fault-trigger/manager/vm_virsh.go
index 8272b4cba2..dd482373f4 100644
--- a/tests/pkg/fault-trigger/manager/vm_virsh.go
+++ b/tests/pkg/fault-trigger/manager/vm_virsh.go
@@ -18,7 +18,7 @@ import (
"os/exec"
"strings"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
type VirshVMManager struct {
@@ -34,7 +34,7 @@ func (m *VirshVMManager) ListVMs() ([]*VM, error) {
cmd := exec.Command("/bin/sh", "-c", shell)
output, err := cmd.CombinedOutput()
if err != nil {
- glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
+ klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
return nil, err
}
vms := m.parserVMs(string(output))
@@ -47,11 +47,11 @@ func (m *VirshVMManager) StopVM(v *VM) error {
cmd := exec.Command("/bin/sh", "-c", shell)
output, err := cmd.CombinedOutput()
if err != nil {
- glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
+ klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
return err
}
- glog.Infof("virtual machine %s is stopped", v.Name)
+ klog.Infof("virtual machine %s is stopped", v.Name)
return nil
}
@@ -62,11 +62,11 @@ func (m *VirshVMManager) StartVM(v *VM) error {
cmd := exec.Command("/bin/sh", "-c", shell)
output, err := cmd.CombinedOutput()
if err != nil {
- glog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
+ klog.Errorf("exec: [%s] failed, output: %s, error: %v", shell, string(output), err)
return err
}
- glog.Infof("virtual machine %s is started", v.Name)
+ klog.Infof("virtual machine %s is started", v.Name)
return nil
}
diff --git a/tests/pkg/fixture/fixture.go b/tests/pkg/fixture/fixture.go
index a8295fae81..2908369eb3 100644
--- a/tests/pkg/fixture/fixture.go
+++ b/tests/pkg/fixture/fixture.go
@@ -14,8 +14,12 @@
package fixture
import (
+ "fmt"
+
"github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"
+ "github.com/pingcap/tidb-operator/pkg/label"
corev1 "k8s.io/api/core/v1"
+ rbacv1beta1 "k8s.io/api/rbac/v1beta1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
@@ -25,8 +29,8 @@ var (
BestEffort = corev1.ResourceRequirements{}
BurstbleSmall = corev1.ResourceRequirements{
Requests: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("200m"),
- corev1.ResourceMemory: resource.MustParse("200Mi"),
+ corev1.ResourceCPU: resource.MustParse("100m"),
+ corev1.ResourceMemory: resource.MustParse("100Mi"),
},
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1000m"),
@@ -35,14 +39,19 @@ var (
}
BurstbleMedium = corev1.ResourceRequirements{
Requests: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("200m"),
- corev1.ResourceMemory: resource.MustParse("200Mi"),
+ corev1.ResourceCPU: resource.MustParse("100m"),
+ corev1.ResourceMemory: resource.MustParse("100Mi"),
},
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("2000m"),
corev1.ResourceMemory: resource.MustParse("4Gi"),
},
}
+ // hard-coded region and s3 bucket in our aws account for e2e testing
+ // TODO create s3 bucket in current region dynamically
+ AWSRegion = "us-west-2"
+ Bucket = "backup.e2e.us-west-2.tidbcloud.com"
+ S3Secret = "s3-secret"
)
func WithStorage(r corev1.ResourceRequirements, size string) corev1.ResourceRequirements {
@@ -182,6 +191,12 @@ func NewTidbMonitor(name, namespace string, tc *v1alpha1.TidbCluster, grafanaEna
Type: corev1.ServiceTypeClusterIP,
Annotations: map[string]string{},
},
+ Envs: map[string]string{
+ "A": "B",
+ "foo": "hello",
+ "bar": "query",
+ "some": "any",
+ },
}
}
if persist {
@@ -191,3 +206,169 @@ func NewTidbMonitor(name, namespace string, tc *v1alpha1.TidbCluster, grafanaEna
}
return monitor
}
+
+func GetBackupRole(tc *v1alpha1.TidbCluster, serviceAccountName string) *rbacv1beta1.Role {
+ return &rbacv1beta1.Role{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: serviceAccountName,
+ Namespace: tc.GetNamespace(),
+ Labels: map[string]string{label.ComponentLabelKey: serviceAccountName},
+ },
+ Rules: []rbacv1beta1.PolicyRule{
+ {
+ APIGroups: []string{""},
+ Resources: []string{"events"},
+ Verbs: []string{"*"},
+ },
+ {
+ APIGroups: []string{"pingcap.com"},
+ Resources: []string{"backups", "restores"},
+ Verbs: []string{"get", "watch", "list", "update"},
+ },
+ },
+ }
+}
+
+func GetBackupServiceAccount(tc *v1alpha1.TidbCluster, serviceAccountName string) *corev1.ServiceAccount {
+ return &corev1.ServiceAccount{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: serviceAccountName,
+ Namespace: tc.GetNamespace(),
+ },
+ }
+}
+
+func GetBackupRoleBing(tc *v1alpha1.TidbCluster, serviceAccountName string) *rbacv1beta1.RoleBinding {
+ return &rbacv1beta1.RoleBinding{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: serviceAccountName,
+ Namespace: tc.GetNamespace(),
+ Labels: map[string]string{label.ComponentLabelKey: serviceAccountName},
+ },
+ Subjects: []rbacv1beta1.Subject{
+ {
+ Kind: rbacv1beta1.ServiceAccountKind,
+ Name: serviceAccountName,
+ },
+ },
+ RoleRef: rbacv1beta1.RoleRef{
+ APIGroup: "rbac.authorization.k8s.io",
+ Kind: "Role",
+ Name: serviceAccountName,
+ },
+ }
+}
+
+func GetBackupSecret(tc *v1alpha1.TidbCluster, password string) *corev1.Secret {
+ return &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: fmt.Sprintf("%s-backup-secret", tc.GetName()),
+ Namespace: tc.GetNamespace(),
+ },
+ Data: map[string][]byte{
+ "password": []byte(password),
+ },
+ Type: corev1.SecretTypeOpaque,
+ }
+}
+
+func GetS3Secret(tc *v1alpha1.TidbCluster, accessKey, secretKey string) *corev1.Secret {
+ return &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: S3Secret,
+ Namespace: tc.GetNamespace(),
+ },
+ Data: map[string][]byte{
+ "access_key": []byte(accessKey),
+ "secret_key": []byte(secretKey),
+ },
+ Type: corev1.SecretTypeOpaque,
+ }
+}
+
+func GetBackupCRDWithBR(tc *v1alpha1.TidbCluster, backupFolder string) *v1alpha1.Backup {
+ sendCredToTikv := true
+ return &v1alpha1.Backup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: fmt.Sprintf("%s-backup", tc.GetName()),
+ Namespace: tc.GetNamespace(),
+ },
+ Spec: v1alpha1.BackupSpec{
+ Type: v1alpha1.BackupTypeFull,
+ StorageProvider: v1alpha1.StorageProvider{
+ S3: &v1alpha1.S3StorageProvider{
+ Provider: v1alpha1.S3StorageProviderTypeAWS,
+ Region: AWSRegion,
+ Bucket: Bucket,
+ Prefix: backupFolder,
+ SecretName: S3Secret,
+ },
+ },
+ From: v1alpha1.TiDBAccessConfig{
+ Host: fmt.Sprintf("%s-tidb.%s", tc.GetName(), tc.GetNamespace()),
+ SecretName: fmt.Sprintf("%s-backup-secret", tc.GetName()),
+ Port: 4000,
+ User: "root",
+ },
+ BR: &v1alpha1.BRConfig{
+ Cluster: tc.GetName(),
+ ClusterNamespace: tc.GetNamespace(),
+ SendCredToTikv: &sendCredToTikv,
+ },
+ },
+ }
+}
+
+func GetRestoreCRDWithBR(tc *v1alpha1.TidbCluster, backupFolder string) *v1alpha1.Restore {
+ sendCredToTikv := true
+ return &v1alpha1.Restore{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: fmt.Sprintf("%s-restore", tc.GetName()),
+ Namespace: tc.GetNamespace(),
+ },
+ Spec: v1alpha1.RestoreSpec{
+ Type: v1alpha1.BackupTypeFull,
+ StorageProvider: v1alpha1.StorageProvider{
+ S3: &v1alpha1.S3StorageProvider{
+ Provider: v1alpha1.S3StorageProviderTypeAWS,
+ Region: AWSRegion,
+ Bucket: Bucket,
+ Prefix: backupFolder,
+ SecretName: S3Secret,
+ },
+ },
+ To: v1alpha1.TiDBAccessConfig{
+ Host: fmt.Sprintf("%s-tidb.%s", tc.GetName(), tc.GetNamespace()),
+ SecretName: fmt.Sprintf("%s-backup-secret", tc.GetName()),
+ Port: 4000,
+ User: "root",
+ },
+ BR: &v1alpha1.BRConfig{
+ Cluster: tc.GetName(),
+ ClusterNamespace: tc.GetNamespace(),
+ SendCredToTikv: &sendCredToTikv,
+ },
+ },
+ }
+}
+
+func GetTidbClusterAutoScaler(name, ns string, tc *v1alpha1.TidbCluster, tm *v1alpha1.TidbMonitor) *v1alpha1.TidbClusterAutoScaler {
+ return &v1alpha1.TidbClusterAutoScaler{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: ns,
+ },
+ Spec: v1alpha1.TidbClusterAutoScalerSpec{
+ Cluster: v1alpha1.TidbClusterRef{
+ Name: tc.Name,
+ Namespace: tc.Namespace,
+ },
+ Monitor: &v1alpha1.TidbMonitorRef{
+ Name: tm.Name,
+ Namespace: tm.Namespace,
+ },
+ TiKV: nil,
+ TiDB: nil,
+ },
+ }
+}
diff --git a/tests/pkg/ops/exec.go b/tests/pkg/ops/exec.go
index 297feafd30..896503a065 100644
--- a/tests/pkg/ops/exec.go
+++ b/tests/pkg/ops/exec.go
@@ -25,7 +25,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// ExecOptions passed to ExecWithOptions
@@ -47,7 +47,7 @@ type ExecOptions struct {
// returning stdout, stderr and error. `options` allowed for
// additional parameters to be passed.
func (cli *ClientOps) ExecWithOptions(options ExecOptions) (string, string, error) {
- glog.Infof("ExecWithOptions %+v", options)
+ klog.Infof("ExecWithOptions %+v", options)
config, err := client.LoadConfig()
if err != nil {
diff --git a/tests/pkg/ops/tikv.go b/tests/pkg/ops/tikv.go
index 43ed9a7c78..de7922b6ad 100644
--- a/tests/pkg/ops/tikv.go
+++ b/tests/pkg/ops/tikv.go
@@ -22,7 +22,7 @@ import (
"github.com/pingcap/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
const (
@@ -70,7 +70,7 @@ func (ops *TiKVOps) TruncateSSTFile(opts TruncateOptions) error {
}
stdout, stderr, err := exec("find", "/var/lib/tikv/db", "-name", "*.sst", "-o", "-name", "*.save")
if err != nil {
- glog.Warningf(logHdr+"list sst files: stderr=%s err=%s", stderr, err.Error())
+ klog.Warningf(logHdr+"list sst files: stderr=%s err=%s", stderr, err.Error())
continue
}
@@ -93,7 +93,7 @@ func (ops *TiKVOps) TruncateSSTFile(opts TruncateOptions) error {
}
}
if len(ssts) == 0 {
- glog.Warning(logHdr + "cannot find a sst file")
+ klog.Warning(logHdr + "cannot find a sst file")
continue
}
@@ -102,17 +102,17 @@ func (ops *TiKVOps) TruncateSSTFile(opts TruncateOptions) error {
_, stderr, err = exec("sh", "-c",
fmt.Sprintf("cp %s %s.save && truncate -s 0 %s", sst, sst, sst))
if err != nil {
- glog.Warningf(logHdr+"truncate sst file: sst=%s stderr=%s err=%s", sst, stderr, err.Error())
+ klog.Warningf(logHdr+"truncate sst file: sst=%s stderr=%s err=%s", sst, stderr, err.Error())
continue
}
truncated++
}
if truncated == 0 {
- glog.Warningf(logHdr + "no sst file has been truncated")
+ klog.Warningf(logHdr + "no sst file has been truncated")
continue
}
- glog.Infof(logHdr+"%d sst files got truncated", truncated)
+ klog.Infof(logHdr+"%d sst files got truncated", truncated)
break
}
@@ -125,14 +125,14 @@ func (ops *TiKVOps) TruncateSSTFile(opts TruncateOptions) error {
func (ops *TiKVOps) RecoverSSTFile(ns, podName string) error {
annotateCmd := fmt.Sprintf("kubectl annotate pod %s -n %s runmode=debug --overwrite", podName, ns)
- glog.Info(annotateCmd)
+ klog.Info(annotateCmd)
res, err := exec.Command("/bin/sh", "-c", annotateCmd).CombinedOutput()
if err != nil {
return fmt.Errorf("failed to annotation pod: %s/%s, %v, %s", ns, podName, err, string(res))
}
findCmd := fmt.Sprintf("kubectl exec -n %s %s -- find /var/lib/tikv/db -name '*.sst.save'", ns, podName)
- glog.Info(findCmd)
+ klog.Info(findCmd)
findData, err := exec.Command("/bin/sh", "-c", findCmd).CombinedOutput()
if err != nil {
return fmt.Errorf("failed to find .save files: %s/%s, %v, %s", ns, podName, err, string(findData))
@@ -145,7 +145,7 @@ func (ops *TiKVOps) RecoverSSTFile(ns, podName string) error {
}
sstFile := strings.TrimSuffix(saveFile, ".save")
mvCmd := fmt.Sprintf("kubectl exec -n %s %s -- mv %s %s", ns, podName, saveFile, sstFile)
- glog.Info(mvCmd)
+ klog.Info(mvCmd)
res, err := exec.Command("/bin/sh", "-c", mvCmd).CombinedOutput()
if err != nil {
return fmt.Errorf("failed to recovery .sst files: %s/%s, %s, %s, %v, %s",
diff --git a/tests/pkg/util/db.go b/tests/pkg/util/db.go
index 4f94d6d14a..ace69cdbe1 100644
--- a/tests/pkg/util/db.go
+++ b/tests/pkg/util/db.go
@@ -18,7 +18,7 @@ import (
"fmt"
"strings"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// OpenDB opens db
@@ -29,7 +29,7 @@ func OpenDB(dsn string, maxIdleConns int) (*sql.DB, error) {
}
db.SetMaxIdleConns(maxIdleConns)
- glog.V(4).Info("DB opens successfully")
+ klog.V(4).Info("DB opens successfully")
return db, nil
}
diff --git a/tests/pkg/util/utils.go b/tests/pkg/util/utils.go
index 8f18244e03..87e8e8741b 100644
--- a/tests/pkg/util/utils.go
+++ b/tests/pkg/util/utils.go
@@ -19,7 +19,7 @@ import (
"os/exec"
"strings"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
const (
@@ -70,7 +70,7 @@ func ListK8sNodes(kubectlPath, labels string) ([]string, error) {
if len(nodes) == 0 {
return nil, fmt.Errorf("get k8s nodes is empty")
}
- glog.Infof("get k8s nodes success: %s, labels: %s", nodes, labels)
+ klog.Infof("get k8s nodes success: %s, labels: %s", nodes, labels)
return nodes, nil
}
diff --git a/tests/pkg/webhook/pods.go b/tests/pkg/webhook/pods.go
index 799c5e8c87..057b0d35ec 100644
--- a/tests/pkg/webhook/pods.go
+++ b/tests/pkg/webhook/pods.go
@@ -27,18 +27,18 @@ import (
"github.com/pingcap/tidb-operator/tests/pkg/client"
"k8s.io/api/admission/v1beta1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// only allow pods to be delete when it is not ddlowner of tidb, not leader of pd and not
// master of tikv.
func (wh *webhook) admitPods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse {
- glog.V(4).Infof("admitting pods")
+ klog.V(4).Infof("admitting pods")
podResource := metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
if ar.Request.Resource != podResource {
err := fmt.Errorf("expect resource to be %s", podResource)
- glog.Errorf("%v", err)
+ klog.Errorf("%v", err)
return toAdmissionResponse(err)
}
@@ -51,22 +51,22 @@ func (wh *webhook) admitPods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionRespo
reviewResponse.Allowed = false
if !wh.namespaces.Has(namespace) {
- glog.V(4).Infof("%q is not in our namespaces %v, skip", namespace, wh.namespaces.List())
+ klog.V(4).Infof("%q is not in our namespaces %v, skip", namespace, wh.namespaces.List())
reviewResponse.Allowed = true
return &reviewResponse
}
pod, err := kubeCli.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
if err != nil {
- glog.Infof("api server send wrong pod info namespace %s name %s err %v", namespace, name, err)
+ klog.Infof("api server send wrong pod info namespace %s name %s err %v", namespace, name, err)
return &reviewResponse
}
- glog.V(4).Infof("delete %s pod [%s]", pod.Labels[label.ComponentLabelKey], pod.GetName())
+ klog.V(4).Infof("delete %s pod [%s]", pod.Labels[label.ComponentLabelKey], pod.GetName())
tc, err := versionCli.PingcapV1alpha1().TidbClusters(namespace).Get(pod.Labels[label.InstanceLabelKey], metav1.GetOptions{})
if err != nil {
- glog.Infof("fail to fetch tidbcluster info namespace %s clustername(instance) %s err %v", namespace, pod.Labels[label.InstanceLabelKey], err)
+ klog.Infof("fail to fetch tidbcluster info namespace %s clustername(instance) %s err %v", namespace, pod.Labels[label.InstanceLabelKey], err)
return &reviewResponse
}
@@ -74,7 +74,7 @@ func (wh *webhook) admitPods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionRespo
// if pod is already deleting, return Allowed
if pod.DeletionTimestamp != nil {
- glog.V(4).Infof("pod:[%s/%s] status is timestamp %s", namespace, name, pod.DeletionTimestamp)
+ klog.V(4).Infof("pod:[%s/%s] status is timestamp %s", namespace, name, pod.DeletionTimestamp)
reviewResponse.Allowed = true
return &reviewResponse
}
@@ -83,22 +83,22 @@ func (wh *webhook) admitPods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionRespo
leader, err := pdClient.GetPDLeader()
if err != nil {
- glog.Errorf("fail to get pd leader %v", err)
+ klog.Errorf("fail to get pd leader %v", err)
return &reviewResponse
}
if leader.Name == name && tc.Status.PD.StatefulSet.Replicas > 1 {
time.Sleep(10 * time.Second)
err := fmt.Errorf("pd is leader, can't be deleted namespace %s name %s", namespace, name)
- glog.Error(err)
+ klog.Error(err)
sendErr := slack.SendErrMsg(err.Error())
if sendErr != nil {
- glog.Error(sendErr)
+ klog.Error(sendErr)
}
// TODO use context instead
os.Exit(3)
}
- glog.Infof("savely delete pod namespace %s name %s leader name %s", namespace, name, leader.Name)
+ klog.Infof("savely delete pod namespace %s name %s leader name %s", namespace, name, leader.Name)
}
reviewResponse.Allowed = true
return &reviewResponse
diff --git a/tests/pkg/webhook/route.go b/tests/pkg/webhook/route.go
index 22b32aecdc..32ab3ef332 100644
--- a/tests/pkg/webhook/route.go
+++ b/tests/pkg/webhook/route.go
@@ -21,7 +21,7 @@ import (
"k8s.io/api/admission/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
// toAdmissionResponse is a helper function to create an AdmissionResponse
@@ -83,10 +83,10 @@ func serve(w http.ResponseWriter, r *http.Request, admit admitFunc) {
returnData:
respBytes, err := json.Marshal(responseAdmissionReview)
if err != nil {
- glog.Error(err)
+ klog.Error(err)
}
if _, err := w.Write(respBytes); err != nil {
- glog.Error(err)
+ klog.Error(err)
}
}
diff --git a/tests/slack/slack.go b/tests/slack/slack.go
index 6c0d948abb..fc465834cb 100644
--- a/tests/slack/slack.go
+++ b/tests/slack/slack.go
@@ -20,7 +20,7 @@ import (
"net/http"
"time"
- glog "k8s.io/klog"
+ "k8s.io/klog"
)
var (
@@ -163,7 +163,7 @@ func SendWarnMsg(msg string) error {
func NotifyAndPanic(err error) {
sendErr := SendErrMsg(fmt.Sprintf("Succeed %d times, then failed: %s", SuccessCount, err.Error()))
if sendErr != nil {
- glog.Warningf("failed to notify slack[%s] the massage: %v,error: %v", WebhookURL, err, sendErr)
+ klog.Warningf("failed to notify slack[%s] the massage: %v,error: %v", WebhookURL, err, sendErr)
}
time.Sleep(3 * time.Second)
panic(err)
@@ -173,7 +173,7 @@ func NotifyAndCompletedf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
sendErr := SendGoodMsg(msg)
if sendErr != nil {
- glog.Warningf("failed to notify slack[%s] the massage: %s,error: %v", WebhookURL, msg, sendErr)
+ klog.Warningf("failed to notify slack[%s] the massage: %s,error: %v", WebhookURL, msg, sendErr)
}
- glog.Infof(msg)
+ klog.Infof(msg)
}
diff --git a/tests/util.go b/tests/util.go
index d024a568fd..aab774ca02 100644
--- a/tests/util.go
+++ b/tests/util.go
@@ -17,17 +17,18 @@ import (
"bytes"
"fmt"
"math/rand"
+ "os/exec"
"text/template"
"time"
"github.com/pingcap/tidb-operator/tests/slack"
-
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
+ "k8s.io/klog"
)
// Keep will keep the fun running in the period, otherwise the fun return error
@@ -299,3 +300,21 @@ func waitForComponentStatus(c kubernetes.Interface, component string, statusType
func IntPtr(i int) *int {
return &i
}
+
+func DeployReleasedCRDOrDie(version string) {
+ cmd := fmt.Sprintf(`kubectl apply -f https://raw.githubusercontent.com/pingcap/tidb-operator/%s/manifests/crd.yaml`, version)
+ klog.Info(cmd)
+ res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput()
+ if err != nil {
+ klog.Fatalf(fmt.Sprintf("failed to deploy crd: %v, %s", err, string(res)))
+ }
+}
+
+func CleanReleasedCRDOrDie(version string) {
+ cmd := fmt.Sprintf(`kubectl delete -f https://raw.githubusercontent.com/pingcap/tidb-operator/%s/manifests/crd.yaml`, version)
+ klog.Info(cmd)
+ res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput()
+ if err != nil {
+ klog.Fatalf(fmt.Sprintf("failed to clean crd: %v, %s", err, string(res)))
+ }
+}
diff --git a/tests/webhook.go b/tests/webhook.go
index 1aec2b67a4..1e2b0ae1a2 100644
--- a/tests/webhook.go
+++ b/tests/webhook.go
@@ -14,6 +14,7 @@
package tests
import (
+ "encoding/base64"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -41,7 +42,7 @@ func (oa *operatorActions) setCabundleFromApiServer(info *OperatorConfig) error
if !existed {
return fmt.Errorf("failed to get caBundle from configmap[%s/%s]", namespace, name)
}
- info.Cabundle = content
+ info.Cabundle = base64.StdEncoding.EncodeToString([]byte(content))
return nil
}
return nil