From 7da4bd2923b5f49e8dd0b4267ae3ba938c9919da Mon Sep 17 00:00:00 2001 From: Tennix Date: Fri, 7 Dec 2018 17:38:01 +0800 Subject: [PATCH] bump TiDB to v2.1.0 (#212) * bump TiDB to v2.1.0 --- .../templates/config/_drainer-config.tpl | 31 ++++++-- .../templates/config/_pd-config.tpl | 5 ++ .../config/_privileged-tidb-config.tpl | 36 +++++---- .../templates/config/_pump-config.tpl | 16 +++- .../templates/config/_tidb-config.tpl | 36 +++++---- .../templates/config/_tikv-config.tpl | 73 ++++++++++++------- charts/tidb-cluster/values.yaml | 12 +-- .../tidb-cluster-values.yaml | 12 +-- tests/e2e/upgrade.go | 4 +- 9 files changed, 152 insertions(+), 73 deletions(-) diff --git a/charts/tidb-cluster/templates/config/_drainer-config.tpl b/charts/tidb-cluster/templates/config/_drainer-config.tpl index c3395fb89a4..addc1d1f3f7 100644 --- a/charts/tidb-cluster/templates/config/_drainer-config.tpl +++ b/charts/tidb-cluster/templates/config/_drainer-config.tpl @@ -25,7 +25,7 @@ pd-urls = "http://{{ .Values.clusterName }}-pd:2379" [syncer] # disable sync these schema -ignore-schemas = "{{ .Values.binlog.drainer.ignoreSchemas | default "INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql,test" }}" +ignore-schemas = {{ .Values.binlog.drainer.ignoreSchemas | default "INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql" | quote }} # number of binlog events in a transaction batch txn-batch = {{ .Values.binlog.drainer.txnBatch | default 1 }} @@ -54,6 +54,20 @@ db-type = "{{ .Values.binlog.drainer.destDBType }}" #db-name ="test" #tbl-name = "~^a.*" +{{- if eq .Values.binlog.drainer.destDBType "mysql" }} +# the downstream mysql protocol database +[syncer.to] +host = {{ .Values.binlog.drainer.mysql.host | quote }} +user = {{ .Values.binlog.drainer.mysql.user | default "root" | quote }} +password = {{ .Values.binlog.drainer.mysql.password | quote }} +port = {{ .Values.binlog.drainer.mysql.port | default 3306 }} +# Time and size limits for flash batch write +time-limit = {{ .Values.binlog.drainer.mysql.timeLimit | default "30s" | quote }} +size-limit = {{ .Values.binlog.drainer.mysql.sizeLimit | default 100000 | quote }} +[syncer.to.checkpoint] +#schema = "tidb_binlog" +{{- end }} + {{- if eq .Values.binlog.drainer.destDBType "pb" }} # Uncomment this if you want to use pb or sql as db-type. # Compress compresses output file, like pb and sql file. Now it supports "gzip" algorithm only. @@ -63,16 +77,21 @@ dir = "/data/pb" compression = "gzip" {{- end }} - {{- if eq .Values.binlog.drainer.destDBType "kafka" }} # when db-type is kafka, you can uncomment this to config the down stream kafka, it will be the globle config kafka default [syncer.to] # only need config one of zookeeper-addrs and kafka-addrs, will get kafka address if zookeeper-addrs is configed. {{- if .Values.binlog.drainer.kafka.zookeeperAddrs }} -zookeeper-addrs = {{ .Values.binlog.drainer.kafka.zookeeperAddrs }} +zookeeper-addrs = {{ .Values.binlog.drainer.kafka.zookeeperAddrs | quote }} {{- end }} {{- if .Values.binlog.drainer.kafka.kafkaAddrs }} -kafka-addrs = {{ .Values.binlog.drainer.kafka.kafkaAddrs }} -{{- end }} -kafka-version = {{ .Values.binlog.drainer.kafka.kafkaVersion | default "0.8.2.0" }} +kafka-addrs = {{ .Values.binlog.drainer.kafka.kafkaAddrs | quote }} {{- end }} +kafka-version = {{ .Values.binlog.drainer.kafka.kafkaVersion | default "0.8.2.0" | quote }} +kafka-max-messages = 1024 +# +# +# the topic name drainer will push msg, the default name is _obinlog +# be careful don't use the same name if run multi drainer instances +# topic-name = "" +{{- end -}} diff --git a/charts/tidb-cluster/templates/config/_pd-config.tpl b/charts/tidb-cluster/templates/config/_pd-config.tpl index d1aa3ddf847..daea702054c 100644 --- a/charts/tidb-cluster/templates/config/_pd-config.tpl +++ b/charts/tidb-cluster/templates/config/_pd-config.tpl @@ -17,6 +17,10 @@ initial-cluster-state = "" lease = 3 tso-save-interval = "3s" +namespace-classifier = "table" + +enable-prevote = true + [security] # Path of file that contains list of trusted SSL CAs. if set, following four settings shouldn't be empty cacert-path = "" @@ -54,6 +58,7 @@ address = "" [schedule] max-merge-region-size = 0 +max-merge-region-keys = 0 split-merge-interval = "1h" max-snapshot-count = 3 max-pending-peer-count = 16 diff --git a/charts/tidb-cluster/templates/config/_privileged-tidb-config.tpl b/charts/tidb-cluster/templates/config/_privileged-tidb-config.tpl index 00a5fd96a10..b6558971756 100644 --- a/charts/tidb-cluster/templates/config/_privileged-tidb-config.tpl +++ b/charts/tidb-cluster/templates/config/_privileged-tidb-config.tpl @@ -3,6 +3,9 @@ # TiDB server host. host = "0.0.0.0" +# tidb server advertise IP. +advertise-address = "" + # TiDB server port. port = 4000 @@ -41,9 +44,13 @@ enable-streaming = false # Set system variable 'lower_case_table_names' lower-case-table-names = 2 +# Make "kill query" behavior compatible with MySQL. It's not recommend to +# turn on this option when TiDB server is behind a proxy. +compatible-kill-query = false + [log] # Log level: debug, info, warn, error, fatal. -level = "{{ .Values.privilegedTidb.logLevel }}" +level = {{ .Values.privilegedTidb.logLevel | default "info" | quote }} # Log format, one of json, text, console. format = "text" @@ -123,9 +130,6 @@ stmt-count-limit = 5000 # Set keep alive option for tcp connection. tcp-keep-alive = true -# The maximum number of retries when commit a transaction. -retry-limit = 10 - # Whether support cartesian product. cross-join = true @@ -136,14 +140,18 @@ stats-lease = "3s" run-auto-analyze = true # Probability to use the query feedback to update stats, 0 or 1 for always false/true. -feedback-probability = 0.0 +feedback-probability = 0.05 # The max number of query feedback that cache in memory. query-feedback-limit = 1024 # Pseudo stats will be used if the ratio between the modify count and # row count in statistics of a table is greater than it. -pseudo-estimate-ratio = 0.7 +pseudo-estimate-ratio = 0.8 + +# Force the priority of all statements in a specified priority. +# The value could be "NO_PRIORITY", "LOW_PRIORITY", "HIGH_PRIORITY" or "DELAYED". +force-priority = "NO_PRIORITY" [proxy-protocol] # PROXY protocol acceptable client networks. @@ -153,11 +161,6 @@ networks = "" # PROXY protocol header read timeout, unit is second header-timeout = 5 -[plan-cache] -enabled = false -capacity = 2560 -shards = 256 - [prepared-plan-cache] enabled = false capacity = 100 @@ -228,10 +231,15 @@ grpc-keepalive-timeout = 3 # max time for commit command, must be twice bigger than raft election timeout. commit-timeout = "41s" -[binlog] +[txn-local-latches] +# Enable local latches for transactions. Enable it when +# there are lots of conflicts between transactions. +enabled = false +capacity = 10240000 -# Socket file to write binlog. -binlog-socket = "" +[binlog] +# enable to write binlog. +enable = false # WriteTimeout specifies how long it will wait for writing binlog to pump. write-timeout = "15s" diff --git a/charts/tidb-cluster/templates/config/_pump-config.tpl b/charts/tidb-cluster/templates/config/_pump-config.tpl index e8ed3a2426d..3d6cfa2cbd0 100644 --- a/charts/tidb-cluster/templates/config/_pump-config.tpl +++ b/charts/tidb-cluster/templates/config/_pump-config.tpl @@ -4,7 +4,7 @@ addr = "0.0.0.0:8250" # addr(i.e. 'host:port') to advertise to the public -# advertise-addr = "" +advertise-addr = "" # a integer value to control expiry date of the binlog data, indicates for how long (in days) the binlog data would be stored. # must bigger than 0 @@ -26,3 +26,17 @@ pd-urls = "http://{{ .Values.clusterName }}-pd:2379" # ssl-cert = "/path/to/drainer.pem" # Path of file that contains X509 key in PEM format for connection with cluster components. # ssl-key = "/path/to/drainer-key.pem" +# +# +# we suggest using the default config of the embedded LSM DB now, do not change it unless you know what you are doing +# [storage.kv] +# block-cache-capacity = 8388608 +# block-restart-interval = 16 +# block-size = 4096 +# compaction-L0-trigger = 8 +# compaction-table-size = 67108864 +# compaction-total-size = 536870912 +# compaction-total-size-multiplier = 8 +# write-buffer = 67108864 +# write-L0-pause-trigger = 24 +# write-L0-slowdown-trigger = 17 diff --git a/charts/tidb-cluster/templates/config/_tidb-config.tpl b/charts/tidb-cluster/templates/config/_tidb-config.tpl index 77d2de35d1a..56525ec41f1 100644 --- a/charts/tidb-cluster/templates/config/_tidb-config.tpl +++ b/charts/tidb-cluster/templates/config/_tidb-config.tpl @@ -1,8 +1,11 @@ -#TiDB Configuration. +# TiDB Configuration. # TiDB server host. host = "0.0.0.0" +# tidb server advertise IP. +advertise-address = "" + # TiDB server port. port = 4000 @@ -41,6 +44,10 @@ enable-streaming = false # Set system variable 'lower_case_table_names' lower-case-table-names = 2 +# Make "kill query" behavior compatible with MySQL. It's not recommend to +# turn on this option when TiDB server is behind a proxy. +compatible-kill-query = false + [log] # Log level: debug, info, warn, error, fatal. level = {{ .Values.tidb.logLevel | default "info" | quote }} @@ -121,9 +128,6 @@ stmt-count-limit = 5000 # Set keep alive option for tcp connection. tcp-keep-alive = true -# The maximum number of retries when commit a transaction. -retry-limit = 10 - # Whether support cartesian product. cross-join = true @@ -134,14 +138,18 @@ stats-lease = "3s" run-auto-analyze = true # Probability to use the query feedback to update stats, 0 or 1 for always false/true. -feedback-probability = 0.0 +feedback-probability = 0.05 # The max number of query feedback that cache in memory. query-feedback-limit = 1024 # Pseudo stats will be used if the ratio between the modify count and # row count in statistics of a table is greater than it. -pseudo-estimate-ratio = 0.7 +pseudo-estimate-ratio = 0.8 + +# Force the priority of all statements in a specified priority. +# The value could be "NO_PRIORITY", "LOW_PRIORITY", "HIGH_PRIORITY" or "DELAYED". +force-priority = "NO_PRIORITY" [proxy-protocol] # PROXY protocol acceptable client networks. @@ -151,11 +159,6 @@ networks = "" # PROXY protocol header read timeout, unit is second header-timeout = 5 -[plan-cache] -enabled = false -capacity = 2560 -shards = 256 - [prepared-plan-cache] enabled = false capacity = 100 @@ -226,10 +229,15 @@ grpc-keepalive-timeout = 3 # max time for commit command, must be twice bigger than raft election timeout. commit-timeout = "41s" -[binlog] +[txn-local-latches] +# Enable local latches for transactions. Enable it when +# there are lots of conflicts between transactions. +enabled = false +capacity = 10240000 -# Socket file to write binlog. -binlog-socket = "" +[binlog] +# enable to write binlog. +enable = false # WriteTimeout specifies how long it will wait for writing binlog to pump. write-timeout = "15s" diff --git a/charts/tidb-cluster/templates/config/_tikv-config.tpl b/charts/tidb-cluster/templates/config/_tikv-config.tpl index 3b399604bc1..a2336bd11cb 100644 --- a/charts/tidb-cluster/templates/config/_tikv-config.tpl +++ b/charts/tidb-cluster/templates/config/_tikv-config.tpl @@ -5,11 +5,18 @@ # Time(based on ms): ms, s, m, h # e.g.: 78_000 = "1.3m" -# log level: trace, debug, info, warn, error, off. +# log level: trace, debug, info, warning, error, critical. +# Note that `debug` and `trace` are only available in development builds. log-level = {{ .Values.tikv.logLevel | default "info" | quote }} + # file to store log, write to stderr if it's empty. # log-file = "" +# timespan between rotating the log files. +# Once this timespan passes the existing log file will have a timestamp appended to its name, +# and a new file will be created. +# log-rotation-timespan = "24h" + [readpool.storage] # size of thread pool for high-priority operations # high-concurrency = 4 @@ -17,12 +24,12 @@ log-level = {{ .Values.tikv.logLevel | default "info" | quote }} # normal-concurrency = 4 # size of thread pool for low-priority operations # low-concurrency = 4 -# max running high-priority operations, reject if exceed -# max-tasks-high = 8000 -# max running normal-priority operations, reject if exceed -# max-tasks-normal = 8000 -# max running low-priority operations, reject if exceed -# max-tasks-low = 8000 +# max running high-priority operations of each worker, reject if exceed +# max-tasks-per-worker-high = 2000 +# max running normal-priority operations of each worker, reject if exceed +# max-tasks-per-worker-normal = 2000 +# max running low-priority operations of each worker, reject if exceed +# max-tasks-per-worker-low = 2000 # size of stack size for each thread pool # stack-size = "10MB" @@ -33,9 +40,9 @@ log-level = {{ .Values.tikv.logLevel | default "info" | quote }} # high-concurrency = 8 # normal-concurrency = 8 # low-concurrency = 8 -# max-tasks-high = 16000 -# max-tasks-normal = 16000 -# max-tasks-low = 16000 +# max-tasks-per-worker-high = 2000 +# max-tasks-per-worker-normal = 2000 +# max-tasks-per-worker-low = 2000 # stack-size = "10MB" [server] @@ -48,8 +55,8 @@ log-level = {{ .Values.tikv.logLevel | default "info" | quote }} # maximum number of messages can be processed in one tick. # messages-per-tick = 4096 -# compression type for grpc channel, available values are no, deflate and gzip. -# grpc-compression-type = "no" +# compression type for grpc channel, available values are none, deflate and gzip. +# grpc-compression-type = "none" # size of thread pool for grpc server. # grpc-concurrency = 4 # The number of max concurrent streams/requests on a client connection. @@ -58,15 +65,18 @@ log-level = {{ .Values.tikv.logLevel | default "info" | quote }} # grpc-raft-conn-num = 10 # Amount to read ahead on individual grpc streams. # grpc-stream-initial-window-size = "2MB" +# Time to wait before sending out a ping to check if server is still alive. +# This is only for communications between tikv instances. +# grpc-keepalive-time = "10s" +# Time to wait before closing the connection without receiving keepalive ping +# ack. +# grpc-keepalive-timeout = "3s" # How many snapshots can be sent concurrently. # concurrent-send-snap-limit = 32 # How many snapshots can be recv concurrently. # concurrent-recv-snap-limit = 32 -# max count of tasks being handled, new tasks will be rejected. -# end-point-max-tasks = 2000 - # max recursion level allowed when decoding dag expression # end-point-recursion-limit = 1000 @@ -117,6 +127,9 @@ address = "http://localhost:9091" # empty or http://localhost:9091 to disable or # true (default value) for high reliability, this can prevent data loss when power failure. sync-log = {{ .Values.tikv.syncLog }} +# minimizes disruption when a partitioned node rejoins the cluster by using a two phase election. +# prevote = true + # set the path to raftdb directory, default value is data-dir/raft # raftdb-path = "" @@ -160,14 +173,24 @@ sync-log = {{ .Values.tikv.syncLog }} # Interval to check whether start manual compaction for a region, # region-compact-check-interval = "5m" + # Number of regions for each time to check. # region-compact-check-step = 100 + # The minimum number of delete tombstones to trigger manual compaction. # region-compact-min-tombstones = 10000 + +# The minimum percentage of delete tombstones to trigger manual compaction. +# Should between 1 and 100. Manual compaction only triggered when the number +# of delete tombstones exceeds region-compact-min-tombstones and the percentage +# of delete tombstones exceeds region-compact-tombstones-percent. +# region-compact-tombstones-percent = 30 + # Interval to check whether should start a manual compaction for lock column family, # if written bytes reach lock-cf-compact-threshold for lock column family, will fire # a manual compaction for lock column family. # lock-cf-compact-interval = "10m" + # lock-cf-compact-bytes-threshold = "256MB" # Interval (s) to check region whether the data are consistent. @@ -192,6 +215,11 @@ sync-log = {{ .Values.tikv.syncLog }} # bit smaller. # region-max-size = "144MB" # region-split-size = "96MB" +# When the region's keys exceeds region-max-keys, we will split the region +# into two which the left region's keys will be region-split-keys or a little +# bit smaller. +# region-max-keys = 1440000 +# region-split-keys = 960000 [rocksdb] # Maximum number of concurrent background jobs (compactions and flushes) @@ -285,25 +313,23 @@ sync-log = {{ .Values.tikv.syncLog }} # Allows OS to incrementally sync files to disk while they are being # written, asynchronously, in the background. -# bytes-per-sync = "0MB" +# bytes-per-sync = "1MB" # Allows OS to incrementally sync WAL to disk while it is being written. -# wal-bytes-per-sync = "0KB" +# wal-bytes-per-sync = "512KB" # Specify the maximal size of the Rocksdb info log file. If the log file # is larger than `max_log_file_size`, a new info log file will be created. # If max_log_file_size == 0, all logs will be written to one log file. -# Default: 1GB # info-log-max-size = "1GB" # Time for the Rocksdb info log file to roll (in seconds). # If specified with non-zero value, log file will be rolled # if it has been active longer than `log_file_time_to_roll`. -# Default: 0 (disabled) +# 0 means disabled. # info-log-roll-time = "0" # Maximal Rocksdb info log files to be kept. -# Default: 10 # info-log-keep-log-file-num = 10 # This specifies the Rocksdb info LOG dir. @@ -311,7 +337,6 @@ sync-log = {{ .Values.tikv.syncLog }} # If it is non empty, the log files will be in the specified dir, # and the db data dir's absolute path will be used as the log file # name's prefix. -# Default: empty # info-log-dir = "" # Column Family default used to store actual data of the database. @@ -454,8 +479,8 @@ sync-log = {{ .Values.tikv.syncLog }} # use-direct-io-for-flush-and-compaction = false # enable-pipelined-write = true # allow-concurrent-memtable-write = false -# bytes-per-sync = "0MB" -# wal-bytes-per-sync = "0KB" +# bytes-per-sync = "1MB" +# wal-bytes-per-sync = "512KB" # info-log-max-size = "1GB" # info-log-roll-time = "0" @@ -489,8 +514,6 @@ sync-log = {{ .Values.tikv.syncLog }} # key-path = "" [import] -# the directory to store importing kv data. -# import-dir = "/tmp/tikv/import" # number of threads to handle RPC requests. # num-threads = 8 # stream channel window size, stream will be blocked on channel full. diff --git a/charts/tidb-cluster/values.yaml b/charts/tidb-cluster/values.yaml index 9c2c588eaac..fbc23768f98 100644 --- a/charts/tidb-cluster/values.yaml +++ b/charts/tidb-cluster/values.yaml @@ -28,7 +28,7 @@ services: pd: replicas: 3 - image: pingcap/pd:v2.0.7 + image: pingcap/pd:v2.1.0 logLevel: info # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. # different classes might map to quality-of-service levels, or to backup policies, @@ -70,7 +70,7 @@ pd: tikv: replicas: 3 - image: pingcap/tikv:v2.0.7 + image: pingcap/tikv:v2.1.0 logLevel: info # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. # different classes might map to quality-of-service levels, or to backup policies, @@ -118,7 +118,7 @@ tidb: replicas: 2 # password is TiDB's password, if omit password, a random password is generated # password: "admin" - image: pingcap/tidb:v2.0.7 + image: pingcap/tidb:v2.1.0 # Image pull policy. imagePullPolicy: IfNotPresent logLevel: info @@ -209,7 +209,7 @@ monitor: fullbackup: create: false - binlogImage: pingcap/tidb-binlog:latest + binlogImage: pingcap/tidb-binlog:v2.1.0 binlogImagePullPolicy: IfNotPresent # https://github.com/tennix/tidb-cloud-backup mydumperImage: pingcap/tidb-cloud-backup:latest @@ -246,7 +246,7 @@ binlog: pump: create: false replicas: 1 - image: pingcap/tidb-binlog:new + image: pingcap/tidb-binlog:v2.1.0 imagePullPolicy: IfNotPresent logLevel: info # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. @@ -263,7 +263,7 @@ binlog: drainer: create: false - image: pingcap/tidb-binlog:new + image: pingcap/tidb-binlog:v2.1.0 imagePullPolicy: IfNotPresent logLevel: info # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. diff --git a/images/tidb-operator-e2e/tidb-cluster-values.yaml b/images/tidb-operator-e2e/tidb-cluster-values.yaml index 46232afe633..5905d907e63 100644 --- a/images/tidb-operator-e2e/tidb-cluster-values.yaml +++ b/images/tidb-operator-e2e/tidb-cluster-values.yaml @@ -28,7 +28,7 @@ services: pd: replicas: 3 - image: pingcap/pd:v2.0.7 + image: pingcap/pd:v2.1.0 imagePullPolicy: IfNotPresent logLevel: info # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. @@ -68,7 +68,7 @@ pd: tikv: replicas: 3 - image: pingcap/tikv:v2.0.7 + image: pingcap/tikv:v2.1.0 imagePullPolicy: IfNotPresent logLevel: info # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. @@ -114,7 +114,7 @@ tidb: replicas: 2 # password is TiDB's password, if omit password, a random password is generated password: "admin" - image: pingcap/tidb:v2.0.7 + image: pingcap/tidb:v2.1.0 imagePullPolicy: IfNotPresent logLevel: info resources: {} @@ -200,7 +200,7 @@ monitor: fullbackup: create: false - binlogImage: pingcap/tidb-binlog:latest + binlogImage: pingcap/tidb-binlog:v2.1.0 binlogImagePullPolicy: IfNotPresent # https://github.com/tennix/tidb-cloud-backup mydumperImage: pingcap/tidb-cloud-backup:latest @@ -237,7 +237,7 @@ binlog: pump: create: false replicas: 1 - image: pingcap/tidb-binlog:new + image: pingcap/tidb-binlog:v2.1.0 imagePullPolicy: IfNotPresent logLevel: info # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. @@ -254,7 +254,7 @@ binlog: drainer: create: false - image: pingcap/tidb-binlog:new + image: pingcap/tidb-binlog:v2.1.0 imagePullPolicy: IfNotPresent logLevel: info # storageClassName is a StorageClass provides a way for administrators to describe the "classes" of storage they offer. diff --git a/tests/e2e/upgrade.go b/tests/e2e/upgrade.go index 7cf8c5f22fd..cd0abf2cd0e 100644 --- a/tests/e2e/upgrade.go +++ b/tests/e2e/upgrade.go @@ -28,7 +28,9 @@ import ( ) const ( - upgradeVersion = "v2.0.8" + // TODO: the base version is also v2.1.0 now, so no upgrade right now + // change to later version when TiDB released + upgradeVersion = "v2.1.0" ) func testUpgrade(ns, clusterName string) {