diff --git a/Makefile b/Makefile index bb955f19271..7c31cb7d684 100644 --- a/Makefile +++ b/Makefile @@ -92,7 +92,9 @@ ifneq ($(DASHBOARD_DISTRIBUTION_DIR),) endif PD_SERVER_DEP += dashboard-ui -pd-server: ${PD_SERVER_DEP} +pre-build: ${PD_SERVER_DEP} + +pd-server: pre-build GOEXPERIMENT=$(BUILD_GOEXPERIMENT) CGO_ENABLED=$(BUILD_CGO_ENABLED) go build $(BUILD_FLAGS) -gcflags '$(GCFLAGS)' -ldflags '$(LDFLAGS)' -tags "$(BUILD_TAGS)" -o $(BUILD_BIN_PATH)/pd-server cmd/pd-server/main.go pd-server-failpoint: @@ -103,7 +105,7 @@ pd-server-failpoint: pd-server-basic: SWAGGER=0 DASHBOARD=0 $(MAKE) pd-server -.PHONY: build tools pd-server pd-server-basic +.PHONY: pre-build build tools pd-server pd-server-basic # Tools @@ -172,9 +174,9 @@ install-tools: #### Static checks #### -check: install-tools tidy static generate-errdoc +check: tidy static generate-errdoc -static: install-tools +static: install-tools pre-build @ echo "gofmt ..." @ gofmt -s -l -d $(PACKAGE_DIRECTORIES) 2>&1 | awk '{ print } END { if (NR > 0) { exit 1 } }' @ echo "golangci-lint ..." @@ -240,7 +242,7 @@ basic-test: install-tools ci-test-job: install-tools dashboard-ui @$(FAILPOINT_ENABLE) - ./scripts/ci-subtask.sh $(JOB_COUNT) $(JOB_INDEX) + ./scripts/ci-subtask.sh $(JOB_COUNT) $(JOB_INDEX) || { $(FAILPOINT_DISABLE); exit 1; } @$(FAILPOINT_DISABLE) TSO_INTEGRATION_TEST_PKGS := $(PD_PKG)/tests/server/tso diff --git a/client/client.go b/client/client.go index 800f97ed82c..604adb3f578 100644 --- a/client/client.go +++ b/client/client.go @@ -381,7 +381,7 @@ func createClientWithKeyspace( ctx: clientCtx, cancel: clientCancel, keyspaceID: keyspaceID, - svrUrls: addrsToUrls(svrAddrs), + svrUrls: svrAddrs, tlsCfg: tlsCfg, option: newOption(), } @@ -396,6 +396,9 @@ func createClientWithKeyspace( nil, keyspaceID, c.svrUrls, c.tlsCfg, c.option) if err := c.setup(); err != nil { c.cancel() + if c.pdSvcDiscovery != nil { + c.pdSvcDiscovery.Close() + } return nil, err } @@ -497,7 +500,7 @@ func newClientWithKeyspaceName( updateTokenConnectionCh: make(chan struct{}, 1), ctx: clientCtx, cancel: clientCancel, - svrUrls: addrsToUrls(svrAddrs), + svrUrls: svrAddrs, tlsCfg: tlsCfg, option: newOption(), } @@ -522,6 +525,9 @@ func newClientWithKeyspaceName( clientCtx, clientCancel, &c.wg, c.setServiceMode, updateKeyspaceIDCb, nullKeyspaceID, c.svrUrls, c.tlsCfg, c.option) if err := c.setup(); err != nil { c.cancel() + if c.pdSvcDiscovery != nil { + c.pdSvcDiscovery.Close() + } return nil, err } log.Info("[pd] create pd client with endpoints and keyspace", @@ -1382,19 +1388,6 @@ func (c *client) scatterRegionsWithOptions(ctx context.Context, regionsID []uint return resp, nil } -func addrsToUrls(addrs []string) []string { - // Add default schema "http://" to addrs. - urls := make([]string, 0, len(addrs)) - for _, addr := range addrs { - if strings.Contains(addr, "://") { - urls = append(urls, addr) - } else { - urls = append(urls, "http://"+addr) - } - } - return urls -} - // IsLeaderChange will determine whether there is a leader change. func IsLeaderChange(err error) bool { if err == errs.ErrClientTSOStreamClosed { diff --git a/client/go.mod b/client/go.mod index eb49eb674d8..6a9d29a3184 100644 --- a/client/go.mod +++ b/client/go.mod @@ -12,7 +12,7 @@ require ( github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 github.com/pingcap/kvproto v0.0.0-20231222062942-c0c73f41d0b2 github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 - github.com/prometheus/client_golang v1.11.1 + github.com/prometheus/client_golang v1.18.0 github.com/stretchr/testify v1.8.2 go.uber.org/atomic v1.10.0 go.uber.org/goleak v1.1.11 @@ -28,18 +28,17 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.26.0 // indirect - github.com/prometheus/procfs v0.6.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.46.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.18.0 // indirect - golang.org/x/sys v0.14.0 // indirect + golang.org/x/net v0.20.0 // indirect + golang.org/x/sys v0.16.0 // indirect golang.org/x/text v0.14.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/protobuf v1.32.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/client/go.sum b/client/go.sum index c72e50e79ee..a58d351ebcf 100644 --- a/client/go.sum +++ b/client/go.sum @@ -1,21 +1,12 @@ -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 h1:BjkPE3785EwPhhyuFkbINB+2a1xATwk8SNDWnJiD41g= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5/go.mod h1:jtAfVaU/2cu1+wdSRPWE2c1N2qeAA3K4RH9pYgqwets= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cloudfoundry/gosigar v1.3.6 h1:gIc08FbB3QPb+nAQhINIK/qhf5REKkY0FTGgRGXkcVc= @@ -25,61 +16,22 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= @@ -98,39 +50,25 @@ github.com/pingcap/kvproto v0.0.0-20231222062942-c0c73f41d0b2 h1:364A6VCS+l0oHBK github.com/pingcap/kvproto v0.0.0-20231222062942-c0c73f41d0b2/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8IDP+SZrdhV1Kibl9KrHxJ9eciw= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= +github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -156,7 +94,6 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -167,47 +104,28 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= @@ -226,24 +144,16 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/grpc/examples v0.0.0-20231221225426-4f03f3ff32c9 h1:ATnmU8nL2NfIyTSiBvJVDIDIr3qBmeW+c7z7XU21eWs= google.golang.org/grpc/examples v0.0.0-20231221225426-4f03f3ff32c9/go.mod h1:j5uROIAAgi3YmtiETMt1LW0d/lHqQ7wwrIY4uGRXLQ4= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -254,13 +164,11 @@ gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/client/http/client.go b/client/http/client.go index f16a3abed89..cb02d16d0ef 100644 --- a/client/http/client.go +++ b/client/http/client.go @@ -21,7 +21,6 @@ import ( "encoding/json" "io" "net/http" - "os" "time" "github.com/pingcap/errors" @@ -67,6 +66,8 @@ type clientInner struct { requestCounter *prometheus.CounterVec executionDuration *prometheus.HistogramVec + // defaultSD indicates whether the client is created with the default service discovery. + defaultSD bool } func newClientInner(ctx context.Context, cancel context.CancelFunc, source string) *clientInner { @@ -91,6 +92,10 @@ func (ci *clientInner) close() { if ci.cli != nil { ci.cli.CloseIdleConnections() } + // only close the service discovery if it's created by the client. + if ci.defaultSD && ci.sd != nil { + ci.sd.Close() + } } func (ci *clientInner) reqCounter(name, status string) { @@ -270,21 +275,6 @@ func WithMetrics( } } -// WithLoggerRedirection configures the client with the given logger redirection. -func WithLoggerRedirection(logLevel, fileName string) ClientOption { - cfg := &log.Config{} - cfg.Level = logLevel - if fileName != "" { - f, _ := os.CreateTemp(".", fileName) - fname := f.Name() - f.Close() - cfg.File.Filename = fname - } - lg, p, _ := log.InitLogger(cfg) - log.ReplaceGlobals(lg, p) - return func(c *client) {} -} - // NewClientWithServiceDiscovery creates a PD HTTP client with the given PD service discovery. func NewClientWithServiceDiscovery( source string, @@ -314,7 +304,12 @@ func NewClient( opt(c) } sd := pd.NewDefaultPDServiceDiscovery(ctx, cancel, pdAddrs, c.inner.tlsConf) + if err := sd.Init(); err != nil { + log.Error("[pd] init service discovery failed", zap.String("source", source), zap.Strings("pd-addrs", pdAddrs), zap.Error(err)) + return nil + } c.inner.init(sd) + c.inner.defaultSD = true return c } @@ -371,6 +366,7 @@ func (c *client) request(ctx context.Context, reqInfo *requestInfo, headerOpts . headerOpts...) } +/* The following functions are only for test */ // requestChecker is used to check the HTTP request sent by the client. type requestChecker func(req *http.Request) error @@ -385,3 +381,21 @@ func NewHTTPClientWithRequestChecker(checker requestChecker) *http.Client { Transport: checker, } } + +// newClientWithoutInitServiceDiscovery creates a PD HTTP client +// with the given PD addresses and TLS config without init service discovery. +func newClientWithoutInitServiceDiscovery( + source string, + pdAddrs []string, + opts ...ClientOption, +) Client { + ctx, cancel := context.WithCancel(context.Background()) + c := &client{inner: newClientInner(ctx, cancel, source), callerID: defaultCallerID} + // Apply the options first. + for _, opt := range opts { + opt(c) + } + sd := pd.NewDefaultPDServiceDiscovery(ctx, cancel, pdAddrs, c.inner.tlsConf) + c.inner.init(sd) + return c +} diff --git a/client/http/client_test.go b/client/http/client_test.go index b9fcb5a75e0..02fce93838e 100644 --- a/client/http/client_test.go +++ b/client/http/client_test.go @@ -40,7 +40,7 @@ func TestPDAllowFollowerHandleHeader(t *testing.T) { } return nil }) - c := NewClient("test-header", []string{"http://127.0.0.1"}, WithHTTPClient(httpClient)) + c := newClientWithoutInitServiceDiscovery("test-header", []string{"http://127.0.0.1"}, WithHTTPClient(httpClient)) c.GetRegions(context.Background()) c.GetHistoryHotRegions(context.Background(), &HistoryHotRegionsRequest{}) c.Close() @@ -58,7 +58,7 @@ func TestCallerID(t *testing.T) { } return nil }) - c := NewClient("test-caller-id", []string{"http://127.0.0.1"}, WithHTTPClient(httpClient)) + c := newClientWithoutInitServiceDiscovery("test-caller-id", []string{"http://127.0.0.1"}, WithHTTPClient(httpClient)) c.GetRegions(context.Background()) expectedVal.Store("test") c.WithCallerID(expectedVal.Load()).GetRegions(context.Background()) @@ -69,7 +69,7 @@ func TestWithBackoffer(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - c := NewClient("test-with-backoffer", []string{"http://127.0.0.1"}) + c := newClientWithoutInitServiceDiscovery("test-with-backoffer", []string{"http://127.0.0.1"}) base := 100 * time.Millisecond max := 500 * time.Millisecond diff --git a/client/pd_service_discovery.go b/client/pd_service_discovery.go index 28bb8bbd661..0425f4d537c 100644 --- a/client/pd_service_discovery.go +++ b/client/pd_service_discovery.go @@ -480,18 +480,7 @@ func NewDefaultPDServiceDiscovery( urls []string, tlsCfg *tls.Config, ) *pdServiceDiscovery { var wg sync.WaitGroup - pdsd := &pdServiceDiscovery{ - checkMembershipCh: make(chan struct{}, 1), - ctx: ctx, - cancel: cancel, - wg: &wg, - apiCandidateNodes: [apiKindCount]*pdServiceBalancer{newPDServiceBalancer(emptyErrorFn), newPDServiceBalancer(regionAPIErrorFn)}, - keyspaceID: defaultKeyspaceID, - tlsCfg: tlsCfg, - option: newOption(), - } - pdsd.urls.Store(urls) - return pdsd + return newPDServiceDiscovery(ctx, cancel, &wg, nil, nil, defaultKeyspaceID, urls, tlsCfg, newOption()) } // newPDServiceDiscovery returns a new PD service discovery-based client. @@ -515,6 +504,7 @@ func newPDServiceDiscovery( tlsCfg: tlsCfg, option: option, } + urls = addrsToUrls(urls) pdsd.urls.Store(urls) return pdsd } @@ -911,7 +901,9 @@ func (c *pdServiceDiscovery) checkServiceModeChanged() error { if clusterInfo == nil || len(clusterInfo.ServiceModes) == 0 { return errors.WithStack(errNoServiceModeReturned) } - c.serviceModeUpdateCb(clusterInfo.ServiceModes[0]) + if c.serviceModeUpdateCb != nil { + c.serviceModeUpdateCb(clusterInfo.ServiceModes[0]) + } return nil } @@ -1153,3 +1145,16 @@ func (c *pdServiceDiscovery) switchTSOAllocatorLeaders(allocatorMap map[string]* func (c *pdServiceDiscovery) GetOrCreateGRPCConn(addr string) (*grpc.ClientConn, error) { return grpcutil.GetOrCreateGRPCConn(c.ctx, &c.clientConns, addr, c.tlsCfg, c.option.gRPCDialOptions...) } + +func addrsToUrls(addrs []string) []string { + // Add default schema "http://" to addrs. + urls := make([]string, 0, len(addrs)) + for _, addr := range addrs { + if strings.Contains(addr, "://") { + urls = append(urls, addr) + } else { + urls = append(urls, "http://"+addr) + } + } + return urls +} diff --git a/client/pd_service_discovery_test.go b/client/pd_service_discovery_test.go index 1dc73af1f5f..d97f117790f 100644 --- a/client/pd_service_discovery_test.go +++ b/client/pd_service_discovery_test.go @@ -158,6 +158,8 @@ func (suite *serviceClientTestSuite) TearDownTest() { func (suite *serviceClientTestSuite) TearDownSuite() { suite.leaderServer.grpcServer.GracefulStop() suite.followerServer.grpcServer.GracefulStop() + suite.leaderClient.GetClientConn().Close() + suite.followerClient.GetClientConn().Close() suite.clean() } diff --git a/client/testutil/leak.go b/client/testutil/leak.go index ec2a6543941..28b5baae60f 100644 --- a/client/testutil/leak.go +++ b/client/testutil/leak.go @@ -23,9 +23,4 @@ var LeakOptions = []goleak.Option{ goleak.IgnoreTopFunction("google.golang.org/grpc.(*addrConn).createTransport"), goleak.IgnoreTopFunction("google.golang.org/grpc.(*addrConn).resetTransport"), goleak.IgnoreTopFunction("google.golang.org/grpc.(*Server).handleRawConn"), - // TODO: remove the below options once we fixed the http connection leak problems - goleak.IgnoreTopFunction("internal/poll.runtime_pollWait"), - goleak.IgnoreTopFunction("google.golang.org/grpc/internal/transport.(*controlBuffer).get"), - goleak.IgnoreTopFunction("google.golang.org/grpc/internal/transport.(*http2Server).keepalive"), - goleak.IgnoreTopFunction("google.golang.org/grpc/internal/grpcsync.(*CallbackSerializer).run"), } diff --git a/go.mod b/go.mod index dd7a4caaa1c..e438228a728 100644 --- a/go.mod +++ b/go.mod @@ -35,8 +35,8 @@ require ( github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21 github.com/pingcap/tidb-dashboard v0.0.0-20240111062855-41f7c8011953 - github.com/prometheus/client_golang v1.11.1 - github.com/prometheus/common v0.26.0 + github.com/prometheus/client_golang v1.18.0 + github.com/prometheus/common v0.46.0 github.com/sasha-s/go-deadlock v0.2.0 github.com/shirou/gopsutil/v3 v3.23.3 github.com/smallnest/chanx v0.0.0-20221229104322-eb4c998d2072 @@ -135,7 +135,6 @@ require ( github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-runewidth v0.0.8 // indirect github.com/mattn/go-sqlite3 v1.14.15 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 // indirect github.com/minio/sio v0.3.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -150,8 +149,8 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/procfs v0.6.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/rs/cors v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.0.1 // indirect github.com/samber/lo v1.37.0 // indirect @@ -179,20 +178,20 @@ require ( go.uber.org/fx v1.12.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/arch v0.3.0 // indirect - golang.org/x/crypto v0.17.0 // indirect + golang.org/x/crypto v0.18.0 // indirect golang.org/x/image v0.10.0 // indirect golang.org/x/mod v0.13.0 // indirect - golang.org/x/net v0.18.0 // indirect - golang.org/x/oauth2 v0.4.0 // indirect + golang.org/x/net v0.20.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect golang.org/x/sync v0.4.0 // indirect - golang.org/x/sys v0.15.0 // indirect - golang.org/x/term v0.15.0 // indirect + golang.org/x/sys v0.16.0 // indirect + golang.org/x/term v0.16.0 // indirect golang.org/x/text v0.14.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/protobuf v1.32.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 1fa5a657ac2..0e308d173a0 100644 --- a/go.sum +++ b/go.sum @@ -21,10 +21,7 @@ github.com/Xeoncross/go-aesctr-with-hmac v0.0.0-20200623134604-12b17a7ff502 h1:L github.com/Xeoncross/go-aesctr-with-hmac v0.0.0-20200623134604-12b17a7ff502/go.mod h1:pmnBM9bxWSiHvC/gSWunUIyDvGn33EkP2CUjxFKtTTM= github.com/agiledragon/gomonkey/v2 v2.3.1/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alvaroloes/enumer v1.1.2/go.mod h1:FxrjvuXoDAx9isTJrv4c+T410zFi0DtXIT0m65DJ+Wo= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4= @@ -81,7 +78,6 @@ github.com/cenkalti/backoff/v4 v4.0.2 h1:JIufpQLbh4DkbQoii76ItQIUFzevQSqOLZca4ea github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= @@ -163,11 +159,8 @@ github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= @@ -233,13 +226,6 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -250,10 +236,7 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -335,13 +318,10 @@ github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -354,8 +334,9 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -389,7 +370,6 @@ github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuuj github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 h1:QASJXOGm2RZ5Ardbc86qNFvby9AqkLDibfChMtAg5QM= github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= @@ -477,34 +457,31 @@ github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:Om github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= +github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= @@ -674,8 +651,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 h1:QLureRX3moex6NVu/Lr4MGakp9FdA7sBHGBmvRW7NaM= golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= @@ -709,10 +686,8 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -723,20 +698,18 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -755,22 +728,17 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -785,13 +753,13 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -862,17 +830,11 @@ google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -897,9 +859,7 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/election/lease.go b/pkg/election/lease.go index eada4f8786d..a6b49fb99f8 100644 --- a/pkg/election/lease.go +++ b/pkg/election/lease.go @@ -135,7 +135,7 @@ func (l *lease) KeepAlive(ctx context.Context) { // https://pkg.go.dev/time@master#Timer.Reset timer.Reset(l.leaseTimeout) case <-timer.C: - log.Info("lease timeout", zap.Time("expire", l.expireTime.Load().(time.Time)), zap.String("purpose", l.Purpose)) + log.Info("keep alive lease too slow", zap.Duration("timeout-duration", l.leaseTimeout), zap.Time("actual-expire", l.expireTime.Load().(time.Time)), zap.String("purpose", l.Purpose)) return case <-ctx.Done(): return @@ -154,11 +154,14 @@ func (l *lease) keepAliveWorker(ctx context.Context, interval time.Duration) <-c log.Info("start lease keep alive worker", zap.Duration("interval", interval), zap.String("purpose", l.Purpose)) defer log.Info("stop lease keep alive worker", zap.String("purpose", l.Purpose)) - + lastTime := time.Now() for { - go func() { + start := time.Now() + if start.Sub(lastTime) > interval*2 { + log.Warn("the interval between keeping alive lease is too long", zap.Time("last-time", lastTime)) + } + go func(start time.Time) { defer logutil.LogPanic() - start := time.Now() ctx1, cancel := context.WithTimeout(ctx, l.leaseTimeout) defer cancel() var leaseID clientv3.LeaseID @@ -180,12 +183,13 @@ func (l *lease) keepAliveWorker(ctx context.Context, interval time.Duration) <-c } else { log.Error("keep alive response ttl is zero", zap.String("purpose", l.Purpose)) } - }() + }(start) select { case <-ctx.Done(): return case <-ticker.C: + lastTime = start } } }() diff --git a/pkg/keyspace/tso_keyspace_group.go b/pkg/keyspace/tso_keyspace_group.go index 55c9adf66d9..5ed9747e923 100644 --- a/pkg/keyspace/tso_keyspace_group.go +++ b/pkg/keyspace/tso_keyspace_group.go @@ -216,7 +216,6 @@ func (m *GroupManager) allocNodesToAllKeyspaceGroups(ctx context.Context) { func (m *GroupManager) initTSONodesWatcher(client *clientv3.Client, clusterID uint64) { tsoServiceKey := discovery.TSOPath(clusterID) - tsoServiceEndKey := clientv3.GetPrefixRangeEnd(tsoServiceKey) putFn := func(kv *mvccpb.KeyValue) error { s := &discovery.ServiceRegistryEntry{} @@ -249,7 +248,7 @@ func (m *GroupManager) initTSONodesWatcher(client *clientv3.Client, clusterID ui putFn, deleteFn, func([]*clientv3.Event) error { return nil }, - clientv3.WithRange(tsoServiceEndKey), + true, /* withPrefix */ ) } diff --git a/pkg/keyspace/util.go b/pkg/keyspace/util.go index aa8d0f350ea..e3586ee35d4 100644 --- a/pkg/keyspace/util.go +++ b/pkg/keyspace/util.go @@ -175,8 +175,8 @@ func MakeRegionBound(id uint32) *RegionBound { } } -// makeKeyRanges encodes keyspace ID to correct LabelRule data. -func makeKeyRanges(id uint32) []interface{} { +// MakeKeyRanges encodes keyspace ID to correct LabelRule data. +func MakeKeyRanges(id uint32) []interface{} { regionBound := MakeRegionBound(id) return []interface{}{ map[string]interface{}{ @@ -207,7 +207,7 @@ func MakeLabelRule(id uint32) *labeler.LabelRule { }, }, RuleType: labeler.KeyRange, - Data: makeKeyRanges(id), + Data: MakeKeyRanges(id), } } diff --git a/pkg/mcs/scheduling/server/apis/v1/api.go b/pkg/mcs/scheduling/server/apis/v1/api.go index 8c4ed015a5c..8b48fde611e 100644 --- a/pkg/mcs/scheduling/server/apis/v1/api.go +++ b/pkg/mcs/scheduling/server/apis/v1/api.go @@ -324,7 +324,8 @@ func getOperatorByRegion(c *gin.Context) { // @Tags operators // @Summary List operators. -// @Param kind query string false "Specify the operator kind." Enums(admin, leader, region, waiting) +// @Param kind query string false "Specify the operator kind." Enums(admin, leader, region, waiting) +// @Param object query bool false "Whether to return as JSON object." // @Produce json // @Success 200 {array} operator.Operator // @Failure 500 {string} string "PD server failed to proceed the request." @@ -337,6 +338,7 @@ func getOperators(c *gin.Context) { ) kinds := c.QueryArray("kind") + _, objectFlag := c.GetQuery("object") if len(kinds) == 0 { results, err = handler.GetOperators() } else { @@ -347,7 +349,15 @@ func getOperators(c *gin.Context) { c.String(http.StatusInternalServerError, err.Error()) return } - c.IndentedJSON(http.StatusOK, results) + if objectFlag { + objResults := make([]*operator.OpObject, len(results)) + for i, op := range results { + objResults[i] = op.ToJSONObject() + } + c.IndentedJSON(http.StatusOK, objResults) + } else { + c.IndentedJSON(http.StatusOK, results) + } } // @Tags operator diff --git a/pkg/mcs/scheduling/server/cluster.go b/pkg/mcs/scheduling/server/cluster.go index bbecad51f7c..9752a9160f8 100644 --- a/pkg/mcs/scheduling/server/cluster.go +++ b/pkg/mcs/scheduling/server/cluster.go @@ -7,6 +7,7 @@ import ( "time" "github.com/pingcap/errors" + "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/kvproto/pkg/schedulingpb" "github.com/pingcap/log" @@ -454,7 +455,11 @@ func (c *Cluster) runCoordinator() { defer logutil.LogPanic() defer c.wg.Done() // force wait for 1 minute to make prepare checker won't be directly skipped - c.coordinator.RunUntilStop(collectWaitTime) + runCollectWaitTime := collectWaitTime + failpoint.Inject("changeRunCollectWaitTime", func() { + runCollectWaitTime = 1 * time.Second + }) + c.coordinator.RunUntilStop(runCollectWaitTime) } func (c *Cluster) runMetricsCollectionJob() { diff --git a/pkg/mcs/scheduling/server/config/watcher.go b/pkg/mcs/scheduling/server/config/watcher.go index 32028592504..8db5e656279 100644 --- a/pkg/mcs/scheduling/server/config/watcher.go +++ b/pkg/mcs/scheduling/server/config/watcher.go @@ -146,6 +146,7 @@ func (cw *Watcher) initializeConfigWatcher() error { func([]*clientv3.Event) error { return nil }, putFn, deleteFn, func([]*clientv3.Event) error { return nil }, + false, /* withPrefix */ ) cw.configWatcher.StartWatchLoop() return cw.configWatcher.WaitLoad() @@ -176,7 +177,7 @@ func (cw *Watcher) initializeTTLConfigWatcher() error { func([]*clientv3.Event) error { return nil }, putFn, deleteFn, func([]*clientv3.Event) error { return nil }, - clientv3.WithPrefix(), + true, /* withPrefix */ ) cw.ttlConfigWatcher.StartWatchLoop() return cw.ttlConfigWatcher.WaitLoad() @@ -217,7 +218,7 @@ func (cw *Watcher) initializeSchedulerConfigWatcher() error { func([]*clientv3.Event) error { return nil }, putFn, deleteFn, func([]*clientv3.Event) error { return nil }, - clientv3.WithPrefix(), + true, /* withPrefix */ ) cw.schedulerConfigWatcher.StartWatchLoop() return cw.schedulerConfigWatcher.WaitLoad() diff --git a/pkg/mcs/scheduling/server/meta/watcher.go b/pkg/mcs/scheduling/server/meta/watcher.go index 808e8fc565e..925b28763b5 100644 --- a/pkg/mcs/scheduling/server/meta/watcher.go +++ b/pkg/mcs/scheduling/server/meta/watcher.go @@ -111,7 +111,7 @@ func (w *Watcher) initializeStoreWatcher() error { func([]*clientv3.Event) error { return nil }, putFn, deleteFn, func([]*clientv3.Event) error { return nil }, - clientv3.WithPrefix(), + true, /* withPrefix */ ) w.storeWatcher.StartWatchLoop() return w.storeWatcher.WaitLoad() diff --git a/pkg/mcs/scheduling/server/rule/watcher.go b/pkg/mcs/scheduling/server/rule/watcher.go index 3e11cf9ff9d..d8a8dd3e609 100644 --- a/pkg/mcs/scheduling/server/rule/watcher.go +++ b/pkg/mcs/scheduling/server/rule/watcher.go @@ -110,7 +110,7 @@ func (rw *Watcher) initializeRuleWatcher() error { var suspectKeyRanges *core.KeyRanges preEventsFn := func(events []*clientv3.Event) error { - // It will be locked until the postFn is finished. + // It will be locked until the postEventsFn is finished. rw.ruleManager.Lock() rw.patch = rw.ruleManager.BeginPatch() suspectKeyRanges = &core.KeyRanges{} @@ -188,7 +188,7 @@ func (rw *Watcher) initializeRuleWatcher() error { } postEventsFn := func(events []*clientv3.Event) error { defer rw.ruleManager.Unlock() - if err := rw.ruleManager.TryCommitPatch(rw.patch); err != nil { + if err := rw.ruleManager.TryCommitPatchLocked(rw.patch); err != nil { log.Error("failed to commit patch", zap.Error(err)) return err } @@ -204,7 +204,7 @@ func (rw *Watcher) initializeRuleWatcher() error { preEventsFn, putFn, deleteFn, postEventsFn, - clientv3.WithPrefix(), + true, /* withPrefix */ ) rw.ruleWatcher.StartWatchLoop() return rw.ruleWatcher.WaitLoad() @@ -212,27 +212,38 @@ func (rw *Watcher) initializeRuleWatcher() error { func (rw *Watcher) initializeRegionLabelWatcher() error { prefixToTrim := rw.regionLabelPathPrefix + "/" + // TODO: use txn in region labeler. + preEventsFn := func(events []*clientv3.Event) error { + // It will be locked until the postEventsFn is finished. + rw.regionLabeler.Lock() + return nil + } putFn := func(kv *mvccpb.KeyValue) error { - log.Info("update region label rule", zap.String("key", string(kv.Key)), zap.String("value", string(kv.Value))) + log.Debug("update region label rule", zap.String("key", string(kv.Key)), zap.String("value", string(kv.Value))) rule, err := labeler.NewLabelRuleFromJSON(kv.Value) if err != nil { return err } - return rw.regionLabeler.SetLabelRule(rule) + return rw.regionLabeler.SetLabelRuleLocked(rule) } deleteFn := func(kv *mvccpb.KeyValue) error { key := string(kv.Key) log.Info("delete region label rule", zap.String("key", key)) - return rw.regionLabeler.DeleteLabelRule(strings.TrimPrefix(key, prefixToTrim)) + return rw.regionLabeler.DeleteLabelRuleLocked(strings.TrimPrefix(key, prefixToTrim)) + } + postEventsFn := func(events []*clientv3.Event) error { + defer rw.regionLabeler.Unlock() + rw.regionLabeler.BuildRangeListLocked() + return nil } rw.labelWatcher = etcdutil.NewLoopWatcher( rw.ctx, &rw.wg, rw.etcdClient, "scheduling-region-label-watcher", rw.regionLabelPathPrefix, - func([]*clientv3.Event) error { return nil }, + preEventsFn, putFn, deleteFn, - func([]*clientv3.Event) error { return nil }, - clientv3.WithPrefix(), + postEventsFn, + true, /* withPrefix */ ) rw.labelWatcher.StartWatchLoop() return rw.labelWatcher.WaitLoad() diff --git a/pkg/mcs/scheduling/server/rule/watcher_test.go b/pkg/mcs/scheduling/server/rule/watcher_test.go new file mode 100644 index 00000000000..dafc7dcac2f --- /dev/null +++ b/pkg/mcs/scheduling/server/rule/watcher_test.go @@ -0,0 +1,113 @@ +// Copyright 2024 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rule + +import ( + "context" + "encoding/json" + "os" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/tikv/pd/pkg/keyspace" + "github.com/tikv/pd/pkg/schedule/labeler" + "github.com/tikv/pd/pkg/storage/endpoint" + "github.com/tikv/pd/pkg/storage/kv" + "github.com/tikv/pd/pkg/utils/etcdutil" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/embed" +) + +const ( + clusterID = uint64(20240117) + rulesNum = 16384 +) + +func TestLoadLargeRules(t *testing.T) { + re := require.New(t) + ctx, client, clean := prepare(t) + defer clean() + runWatcherLoadLabelRule(ctx, re, client) +} + +func BenchmarkLoadLargeRules(b *testing.B) { + re := require.New(b) + ctx, client, clean := prepare(b) + defer clean() + + b.ResetTimer() // Resets the timer to ignore initialization time in the benchmark + + for n := 0; n < b.N; n++ { + runWatcherLoadLabelRule(ctx, re, client) + } +} + +func runWatcherLoadLabelRule(ctx context.Context, re *require.Assertions, client *clientv3.Client) { + storage := endpoint.NewStorageEndpoint(kv.NewMemoryKV(), nil) + labelerManager, err := labeler.NewRegionLabeler(ctx, storage, time.Hour) + re.NoError(err) + ctx, cancel := context.WithCancel(ctx) + rw := &Watcher{ + ctx: ctx, + cancel: cancel, + rulesPathPrefix: endpoint.RulesPathPrefix(clusterID), + ruleCommonPathPrefix: endpoint.RuleCommonPathPrefix(clusterID), + ruleGroupPathPrefix: endpoint.RuleGroupPathPrefix(clusterID), + regionLabelPathPrefix: endpoint.RegionLabelPathPrefix(clusterID), + etcdClient: client, + ruleStorage: storage, + regionLabeler: labelerManager, + } + err = rw.initializeRegionLabelWatcher() + re.NoError(err) + re.Len(labelerManager.GetAllLabelRules(), rulesNum) + cancel() +} + +func prepare(t require.TestingT) (context.Context, *clientv3.Client, func()) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + cfg := etcdutil.NewTestSingleConfig() + cfg.Dir = os.TempDir() + "/test_etcd" + os.RemoveAll(cfg.Dir) + etcd, err := embed.StartEtcd(cfg) + re.NoError(err) + client, err := etcdutil.CreateEtcdClient(nil, cfg.LCUrls) + re.NoError(err) + <-etcd.Server.ReadyNotify() + + for i := 1; i < rulesNum+1; i++ { + rule := &labeler.LabelRule{ + ID: "test_" + strconv.Itoa(i), + Labels: []labeler.RegionLabel{{Key: "test", Value: "test"}}, + RuleType: labeler.KeyRange, + Data: keyspace.MakeKeyRanges(uint32(i)), + } + value, err := json.Marshal(rule) + re.NoError(err) + key := endpoint.RegionLabelPathPrefix(clusterID) + "/" + rule.ID + _, err = clientv3.NewKV(client).Put(ctx, key, string(value)) + re.NoError(err) + } + + return ctx, client, func() { + cancel() + client.Close() + etcd.Close() + os.RemoveAll(cfg.Dir) + } +} diff --git a/pkg/mcs/utils/util.go b/pkg/mcs/utils/util.go index 6fa6c2cb08e..0e587688fce 100644 --- a/pkg/mcs/utils/util.go +++ b/pkg/mcs/utils/util.go @@ -177,12 +177,12 @@ func InitClient(s server) error { if err != nil { return err } - etcdClient, httpClient, err := etcdutil.CreateClients(tlsConfig, backendUrls) + etcdClient, err := etcdutil.CreateEtcdClient(tlsConfig, backendUrls) if err != nil { return err } s.SetETCDClient(etcdClient) - s.SetHTTPClient(httpClient) + s.SetHTTPClient(etcdutil.CreateHTTPClient(tlsConfig)) return nil } diff --git a/pkg/member/member.go b/pkg/member/member.go index 4e532270700..8d0eb978c50 100644 --- a/pkg/member/member.go +++ b/pkg/member/member.go @@ -355,7 +355,7 @@ func (m *EmbeddedEtcdMember) ResignEtcdLeader(ctx context.Context, from string, log.Info("try to resign etcd leader to next pd-server", zap.String("from", from), zap.String("to", nextEtcdLeader)) // Determine next etcd leader candidates. var etcdLeaderIDs []uint64 - res, err := etcdutil.ListEtcdMembers(m.client) + res, err := etcdutil.ListEtcdMembers(ctx, m.client) if err != nil { return err } diff --git a/pkg/schedule/config/config.go b/pkg/schedule/config/config.go index 90a37c93d91..8411a84bb1b 100644 --- a/pkg/schedule/config/config.go +++ b/pkg/schedule/config/config.go @@ -553,13 +553,11 @@ type SchedulerConfig struct { var DefaultSchedulers = SchedulerConfigs{ {Type: "balance-region"}, {Type: "balance-leader"}, - {Type: "balance-witness"}, {Type: "hot-region"}, - {Type: "transfer-witness-leader"}, {Type: "evict-slow-store"}, } -// IsDefaultScheduler checks whether the scheduler is enable by default. +// IsDefaultScheduler checks whether the scheduler is enabled by default. func IsDefaultScheduler(typ string) bool { for _, c := range DefaultSchedulers { if typ == c.Type { diff --git a/pkg/schedule/labeler/labeler.go b/pkg/schedule/labeler/labeler.go index 39722b1a038..aeb4ff7b2f9 100644 --- a/pkg/schedule/labeler/labeler.go +++ b/pkg/schedule/labeler/labeler.go @@ -24,6 +24,7 @@ import ( "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/schedule/rangelist" "github.com/tikv/pd/pkg/storage/endpoint" + "github.com/tikv/pd/pkg/storage/kv" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/pkg/utils/syncutil" "go.uber.org/zap" @@ -88,18 +89,19 @@ func (l *RegionLabeler) checkAndClearExpiredLabels() { continue } if len(rule.Labels) == 0 { - err = l.storage.DeleteRegionRule(key) - delete(l.labelRules, key) - deleted = true + err = l.DeleteLabelRuleLocked(key) + if err == nil { + deleted = true + } } else { - err = l.storage.SaveRegionRule(key, rule) + err = l.SaveLabelRuleLocked(rule) } if err != nil { log.Error("failed to save rule expired label rule", zap.String("rule-key", key), zap.Error(err)) } } if deleted { - l.buildRangeList() + l.BuildRangeListLocked() } } @@ -123,16 +125,19 @@ func (l *RegionLabeler) loadRules() error { if err != nil { return err } - for _, d := range toDelete { - if err = l.storage.DeleteRegionRule(d); err != nil { + for _, id := range toDelete { + if err := l.storage.RunInTxn(l.ctx, func(txn kv.Txn) error { + return l.storage.DeleteRegionRule(txn, id) + }); err != nil { return err } } - l.buildRangeList() + l.BuildRangeListLocked() return nil } -func (l *RegionLabeler) buildRangeList() { +// BuildRangeListLocked builds the range list. +func (l *RegionLabeler) BuildRangeListLocked() { builder := rangelist.NewBuilder() l.minExpire = nil for _, rule := range l.labelRules { @@ -196,29 +201,44 @@ func (l *RegionLabeler) getAndCheckRule(id string, now time.Time) *LabelRule { return rule } if len(rule.Labels) == 0 { - l.storage.DeleteRegionRule(id) - delete(l.labelRules, id) + l.DeleteLabelRuleLocked(id) return nil } - l.storage.SaveRegionRule(id, rule) + l.SaveLabelRuleLocked(rule) return rule } // SetLabelRule inserts or updates a LabelRule. func (l *RegionLabeler) SetLabelRule(rule *LabelRule) error { + l.Lock() + defer l.Unlock() + if err := l.SetLabelRuleLocked(rule); err != nil { + return err + } + l.BuildRangeListLocked() + return nil +} + +// SetLabelRuleLocked inserts or updates a LabelRule but not buildRangeList. +func (l *RegionLabeler) SetLabelRuleLocked(rule *LabelRule) error { if err := rule.checkAndAdjust(); err != nil { return err } - l.Lock() - defer l.Unlock() - if err := l.storage.SaveRegionRule(rule.ID, rule); err != nil { + if err := l.SaveLabelRuleLocked(rule); err != nil { return err } l.labelRules[rule.ID] = rule - l.buildRangeList() return nil } +// SaveLabelRuleLocked inserts or updates a LabelRule but not buildRangeList. +// It only saves the rule to storage, and does not update the in-memory states. +func (l *RegionLabeler) SaveLabelRuleLocked(rule *LabelRule) error { + return l.storage.RunInTxn(l.ctx, func(txn kv.Txn) error { + return l.storage.SaveRegionRule(txn, rule.ID, rule) + }) +} + // DeleteLabelRule removes a LabelRule. func (l *RegionLabeler) DeleteLabelRule(id string) error { l.Lock() @@ -226,32 +246,57 @@ func (l *RegionLabeler) DeleteLabelRule(id string) error { if _, ok := l.labelRules[id]; !ok { return errs.ErrRegionRuleNotFound.FastGenByArgs(id) } - if err := l.storage.DeleteRegionRule(id); err != nil { + if err := l.DeleteLabelRuleLocked(id); err != nil { + return err + } + l.BuildRangeListLocked() + return nil +} + +// DeleteLabelRuleLocked removes a LabelRule but not buildRangeList. +func (l *RegionLabeler) DeleteLabelRuleLocked(id string) error { + if err := l.storage.RunInTxn(l.ctx, func(txn kv.Txn) error { + return l.storage.DeleteRegionRule(txn, id) + }); err != nil { return err } delete(l.labelRules, id) - l.buildRangeList() return nil } // Patch updates multiple region rules in a batch. func (l *RegionLabeler) Patch(patch LabelRulePatch) error { + // setRulesMap is used to solve duplicate entries in DeleteRules and SetRules. + // Note: We maintain compatibility with the previous behavior, which is to process DeleteRules before SetRules + // If there are duplicate rules, we will prioritize SetRules and select the last one from SetRules. + setRulesMap := make(map[string]*LabelRule) + for _, rule := range patch.SetRules { if err := rule.checkAndAdjust(); err != nil { return err } + setRulesMap[rule.ID] = rule } // save to storage + var batch []func(kv.Txn) error for _, key := range patch.DeleteRules { - if err := l.storage.DeleteRegionRule(key); err != nil { - return err + if _, ok := setRulesMap[key]; ok { + continue } + localKey := key + batch = append(batch, func(txn kv.Txn) error { + return l.storage.DeleteRegionRule(txn, localKey) + }) } - for _, rule := range patch.SetRules { - if err := l.storage.SaveRegionRule(rule.ID, rule); err != nil { - return err - } + for _, rule := range setRulesMap { + localID, localRule := rule.ID, rule + batch = append(batch, func(txn kv.Txn) error { + return l.storage.SaveRegionRule(txn, localID, localRule) + }) + } + if err := endpoint.RunBatchOpInTxn(l.ctx, l.storage, batch); err != nil { + return err } // update in-memory states. @@ -261,10 +306,10 @@ func (l *RegionLabeler) Patch(patch LabelRulePatch) error { for _, key := range patch.DeleteRules { delete(l.labelRules, key) } - for _, rule := range patch.SetRules { + for _, rule := range setRulesMap { l.labelRules[rule.ID] = rule } - l.buildRangeList() + l.BuildRangeListLocked() return nil } diff --git a/pkg/schedule/labeler/labeler_test.go b/pkg/schedule/labeler/labeler_test.go index f38c6321c01..87773ce892d 100644 --- a/pkg/schedule/labeler/labeler_test.go +++ b/pkg/schedule/labeler/labeler_test.go @@ -20,14 +20,17 @@ import ( "encoding/json" "fmt" "sort" + "strconv" "testing" "time" "github.com/pingcap/failpoint" "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" + "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/storage/kv" + "github.com/tikv/pd/pkg/utils/etcdutil" ) func TestAdjustRule(t *testing.T) { @@ -132,6 +135,78 @@ func TestGetSetRule(t *testing.T) { for id, rule := range allRules { expectSameRules(re, rule, rules[id+1]) } + + for _, r := range rules { + labeler.DeleteLabelRule(r.ID) + } + re.Empty(labeler.GetAllLabelRules()) +} + +func TestTxnWithEtcd(t *testing.T) { + re := require.New(t) + _, client, clean := etcdutil.NewTestEtcdCluster(t, 1) + defer clean() + store := storage.NewStorageWithEtcdBackend(client, "") + labeler, err := NewRegionLabeler(context.Background(), store, time.Millisecond*10) + re.NoError(err) + // test patch rules in batch + rulesNum := 200 + patch := LabelRulePatch{} + for i := 1; i <= rulesNum; i++ { + patch.SetRules = append(patch.SetRules, &LabelRule{ + ID: fmt.Sprintf("rule_%d", i), + Labels: []RegionLabel{ + {Key: fmt.Sprintf("k_%d", i), Value: fmt.Sprintf("v_%d", i)}, + }, + RuleType: "key-range", + Data: MakeKeyRanges("", ""), + }) + } + err = labeler.Patch(patch) + re.NoError(err) + allRules := labeler.GetAllLabelRules() + re.Len(allRules, rulesNum) + sort.Slice(allRules, func(i, j int) bool { + i1, err := strconv.Atoi(allRules[i].ID[5:]) + re.NoError(err) + j1, err := strconv.Atoi(allRules[j].ID[5:]) + re.NoError(err) + return i1 < j1 + }) + for id, rule := range allRules { + expectSameRules(re, rule, patch.SetRules[id]) + } + patch.SetRules = patch.SetRules[:0] + patch.DeleteRules = patch.DeleteRules[:0] + for i := 1; i <= rulesNum; i++ { + patch.DeleteRules = append(patch.DeleteRules, fmt.Sprintf("rule_%d", i)) + } + err = labeler.Patch(patch) + re.NoError(err) + allRules = labeler.GetAllLabelRules() + re.Empty(allRules) + + // test patch rules in batch with duplicated rule id + patch.SetRules = patch.SetRules[:0] + patch.DeleteRules = patch.DeleteRules[:0] + for i := 0; i <= 3; i++ { + patch.SetRules = append(patch.SetRules, &LabelRule{ + ID: "rule_1", + Labels: []RegionLabel{ + {Key: fmt.Sprintf("k_%d", i), Value: fmt.Sprintf("v_%d", i)}, + }, + RuleType: "key-range", + Data: MakeKeyRanges("", ""), + }) + } + patch.DeleteRules = append(patch.DeleteRules, "rule_1") + err = labeler.Patch(patch) + re.NoError(err) + allRules = labeler.GetAllLabelRules() + re.Len(allRules, 1) + re.Equal("rule_1", allRules[0].ID) + re.Len(allRules[0].Labels, 1) + re.Equal("k_3", allRules[0].Labels[0].Key) } func TestIndex(t *testing.T) { diff --git a/pkg/schedule/operator/operator.go b/pkg/schedule/operator/operator.go index a8c54e824fb..b87a050969f 100644 --- a/pkg/schedule/operator/operator.go +++ b/pkg/schedule/operator/operator.go @@ -149,6 +149,39 @@ func (o *Operator) MarshalJSON() ([]byte, error) { return []byte(`"` + o.String() + `"`), nil } +// OpObject is used to return Operator as a json object for API. +type OpObject struct { + Desc string `json:"desc"` + Brief string `json:"brief"` + RegionID uint64 `json:"region_id"` + RegionEpoch *metapb.RegionEpoch `json:"region_epoch"` + Kind OpKind `json:"kind"` + Timeout string `json:"timeout"` + Status OpStatus `json:"status"` +} + +// ToJSONObject serializes Operator as JSON object. +func (o *Operator) ToJSONObject() *OpObject { + var status OpStatus + if o.CheckSuccess() { + status = SUCCESS + } else if o.CheckTimeout() { + status = TIMEOUT + } else { + status = o.Status() + } + + return &OpObject{ + Desc: o.desc, + Brief: o.brief, + RegionID: o.regionID, + RegionEpoch: o.regionEpoch, + Kind: o.kind, + Timeout: o.timeout.String(), + Status: status, + } +} + // Desc returns the operator's short description. func (o *Operator) Desc() string { return o.desc diff --git a/pkg/schedule/operator/operator_test.go b/pkg/schedule/operator/operator_test.go index 1b0ff8385bf..4719df9408b 100644 --- a/pkg/schedule/operator/operator_test.go +++ b/pkg/schedule/operator/operator_test.go @@ -541,3 +541,36 @@ func (suite *operatorTestSuite) TestRecord() { re.Equal(now, ob.FinishTime) re.Greater(ob.duration.Seconds(), time.Second.Seconds()) } + +func (suite *operatorTestSuite) TestToJSONObject() { + steps := []OpStep{ + AddPeer{ToStore: 1, PeerID: 1}, + TransferLeader{FromStore: 3, ToStore: 1}, + RemovePeer{FromStore: 3}, + } + op := suite.newTestOperator(101, OpLeader|OpRegion, steps...) + op.Start() + obj := op.ToJSONObject() + suite.Equal("test", obj.Desc) + suite.Equal("test", obj.Brief) + suite.Equal(uint64(101), obj.RegionID) + suite.Equal(OpLeader|OpRegion, obj.Kind) + suite.Equal("12m0s", obj.Timeout) + suite.Equal(STARTED, obj.Status) + + // Test SUCCESS status. + region := suite.newTestRegion(1, 1, [2]uint64{1, 1}, [2]uint64{2, 2}) + suite.Nil(op.Check(region)) + suite.Equal(SUCCESS, op.Status()) + obj = op.ToJSONObject() + suite.Equal(SUCCESS, obj.Status) + + // Test TIMEOUT status. + steps = []OpStep{TransferLeader{FromStore: 2, ToStore: 1}} + op = suite.newTestOperator(1, OpLeader, steps...) + op.Start() + op.SetStatusReachTime(STARTED, op.GetStartTime().Add(-FastStepWaitTime-time.Second)) + suite.True(op.CheckTimeout()) + obj = op.ToJSONObject() + suite.Equal(TIMEOUT, obj.Status) +} diff --git a/pkg/schedule/placement/rule_manager.go b/pkg/schedule/placement/rule_manager.go index 0a66d82e865..f44258d797c 100644 --- a/pkg/schedule/placement/rule_manager.go +++ b/pkg/schedule/placement/rule_manager.go @@ -34,7 +34,6 @@ import ( "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/storage/kv" - "github.com/tikv/pd/pkg/utils/etcdutil" "github.com/tikv/pd/pkg/utils/syncutil" "go.uber.org/zap" "golang.org/x/exp/slices" @@ -202,7 +201,7 @@ func (m *RuleManager) loadRules() error { return m.storage.DeleteRule(txn, localKey) }) } - return m.runBatchOpInTxn(batch) + return endpoint.RunBatchOpInTxn(m.ctx, m.storage, batch) } func (m *RuleManager) loadGroups() error { @@ -311,7 +310,7 @@ func (m *RuleManager) SetRule(rule *Rule) error { defer m.Unlock() p := m.BeginPatch() p.SetRule(rule) - if err := m.TryCommitPatch(p); err != nil { + if err := m.TryCommitPatchLocked(p); err != nil { return err } log.Info("placement rule updated", zap.String("rule", fmt.Sprint(rule))) @@ -324,7 +323,7 @@ func (m *RuleManager) DeleteRule(group, id string) error { defer m.Unlock() p := m.BeginPatch() p.DeleteRule(group, id) - if err := m.TryCommitPatch(p); err != nil { + if err := m.TryCommitPatchLocked(p); err != nil { return err } log.Info("placement rule is removed", zap.String("group", group), zap.String("id", id)) @@ -469,8 +468,8 @@ func (m *RuleManager) BeginPatch() *RuleConfigPatch { return m.ruleConfig.beginPatch() } -// TryCommitPatch tries to commit a patch. -func (m *RuleManager) TryCommitPatch(patch *RuleConfigPatch) error { +// TryCommitPatchLocked tries to commit a patch. +func (m *RuleManager) TryCommitPatchLocked(patch *RuleConfigPatch) error { patch.adjust() ruleList, err := buildRuleList(patch) @@ -521,7 +520,7 @@ func (m *RuleManager) savePatch(p *ruleConfig) error { }) } } - return m.runBatchOpInTxn(batch) + return endpoint.RunBatchOpInTxn(m.ctx, m.storage, batch) } // SetRules inserts or updates lots of Rules at once. @@ -535,7 +534,7 @@ func (m *RuleManager) SetRules(rules []*Rule) error { } p.SetRule(r) } - if err := m.TryCommitPatch(p); err != nil { + if err := m.TryCommitPatchLocked(p); err != nil { return err } @@ -598,7 +597,7 @@ func (m *RuleManager) Batch(todo []RuleOp) error { } } - if err := m.TryCommitPatch(patch); err != nil { + if err := m.TryCommitPatchLocked(patch); err != nil { return err } @@ -634,7 +633,7 @@ func (m *RuleManager) SetRuleGroup(group *RuleGroup) error { defer m.Unlock() p := m.BeginPatch() p.SetGroup(group) - if err := m.TryCommitPatch(p); err != nil { + if err := m.TryCommitPatchLocked(p); err != nil { return err } log.Info("group config updated", zap.String("group", fmt.Sprint(group))) @@ -647,7 +646,7 @@ func (m *RuleManager) DeleteRuleGroup(id string) error { defer m.Unlock() p := m.BeginPatch() p.DeleteGroup(id) - if err := m.TryCommitPatch(p); err != nil { + if err := m.TryCommitPatchLocked(p); err != nil { return err } log.Info("group config reset", zap.String("group", id)) @@ -737,7 +736,7 @@ func (m *RuleManager) SetAllGroupBundles(groups []GroupBundle, override bool) er p.SetRule(r) } } - if err := m.TryCommitPatch(p); err != nil { + if err := m.TryCommitPatchLocked(p); err != nil { return err } log.Info("full config reset", zap.String("config", fmt.Sprint(groups))) @@ -768,7 +767,7 @@ func (m *RuleManager) SetGroupBundle(group GroupBundle) error { } p.SetRule(r) } - if err := m.TryCommitPatch(p); err != nil { + if err := m.TryCommitPatchLocked(p); err != nil { return err } log.Info("group is reset", zap.String("group", fmt.Sprint(group))) @@ -800,7 +799,7 @@ func (m *RuleManager) DeleteGroupBundle(id string, regex bool) error { p.DeleteGroup(g.ID) } } - if err := m.TryCommitPatch(p); err != nil { + if err := m.TryCommitPatchLocked(p); err != nil { return err } log.Info("groups are removed", zap.String("id", id), zap.Bool("regexp", regex)) @@ -814,28 +813,6 @@ func (m *RuleManager) IsInitialized() bool { return m.initialized } -func (m *RuleManager) runBatchOpInTxn(batch []func(kv.Txn) error) error { - // execute batch in transaction with limited operations per transaction - for start := 0; start < len(batch); start += etcdutil.MaxEtcdTxnOps { - end := start + etcdutil.MaxEtcdTxnOps - if end > len(batch) { - end = len(batch) - } - err := m.storage.RunInTxn(m.ctx, func(txn kv.Txn) (err error) { - for _, op := range batch[start:end] { - if err = op(txn); err != nil { - return err - } - } - return nil - }) - if err != nil { - return err - } - } - return nil -} - // checkRule check the rule whether will have RuleFit after FitRegion // in order to reduce the calculation. func checkRule(rule *Rule, stores []*core.StoreInfo) bool { diff --git a/pkg/schedule/prepare_checker.go b/pkg/schedule/prepare_checker.go index 7843249229b..126e3bba41d 100644 --- a/pkg/schedule/prepare_checker.go +++ b/pkg/schedule/prepare_checker.go @@ -60,6 +60,10 @@ func (checker *prepareChecker) check(c *core.BasicCluster, collectWaitTime ...ti continue } storeID := store.GetID() + // It is used to avoid sudden scheduling when scheduling service is just started. + if len(collectWaitTime) > 0 && (float64(store.GetStoreStats().GetRegionCount())*collectFactor > float64(c.GetNotFromStorageRegionsCntByStore(storeID))) { + return false + } // For each store, the number of active regions should be more than total region of the store * collectFactor if float64(c.GetStoreRegionCount(storeID))*collectFactor > float64(c.GetNotFromStorageRegionsCntByStore(storeID)) { return false diff --git a/pkg/schedule/schedulers/scheduler_controller.go b/pkg/schedule/schedulers/scheduler_controller.go index 818f02685ea..5953ecac5e3 100644 --- a/pkg/schedule/schedulers/scheduler_controller.go +++ b/pkg/schedule/schedulers/scheduler_controller.go @@ -48,7 +48,7 @@ type Controller struct { ctx context.Context cluster sche.SchedulerCluster storage endpoint.ConfigStorage - // schedulers is used to manage all schedulers, which will only be initialized + // schedulers are used to manage all schedulers, which will only be initialized // and used in the PD leader service mode now. schedulers map[string]*ScheduleController // schedulerHandlers is used to manage the HTTP handlers of schedulers, diff --git a/pkg/storage/endpoint/rule.go b/pkg/storage/endpoint/rule.go index ad245f527bb..b0827fda477 100644 --- a/pkg/storage/endpoint/rule.go +++ b/pkg/storage/endpoint/rule.go @@ -27,16 +27,17 @@ type RuleStorage interface { LoadRule(ruleKey string) (string, error) LoadRules(f func(k, v string)) error LoadRuleGroups(f func(k, v string)) error + LoadRegionRules(f func(k, v string)) error + // We need to use txn to avoid concurrent modification. // And it is helpful for the scheduling server to watch the rule. SaveRule(txn kv.Txn, ruleKey string, rule interface{}) error DeleteRule(txn kv.Txn, ruleKey string) error SaveRuleGroup(txn kv.Txn, groupID string, group interface{}) error DeleteRuleGroup(txn kv.Txn, groupID string) error + SaveRegionRule(txn kv.Txn, ruleKey string, rule interface{}) error + DeleteRegionRule(txn kv.Txn, ruleKey string) error - LoadRegionRules(f func(k, v string)) error - SaveRegionRule(ruleKey string, rule interface{}) error - DeleteRegionRule(ruleKey string) error RunInTxn(ctx context.Context, f func(txn kv.Txn) error) error } @@ -73,13 +74,13 @@ func (se *StorageEndpoint) LoadRegionRules(f func(k, v string)) error { } // SaveRegionRule saves a region rule to the storage. -func (se *StorageEndpoint) SaveRegionRule(ruleKey string, rule interface{}) error { - return se.saveJSON(regionLabelKeyPath(ruleKey), rule) +func (se *StorageEndpoint) SaveRegionRule(txn kv.Txn, ruleKey string, rule interface{}) error { + return saveJSONInTxn(txn, regionLabelKeyPath(ruleKey), rule) } // DeleteRegionRule removes a region rule from storage. -func (se *StorageEndpoint) DeleteRegionRule(ruleKey string) error { - return se.Remove(regionLabelKeyPath(ruleKey)) +func (se *StorageEndpoint) DeleteRegionRule(txn kv.Txn, ruleKey string) error { + return txn.Remove(regionLabelKeyPath(ruleKey)) } // LoadRule load a placement rule from storage. diff --git a/pkg/storage/endpoint/util.go b/pkg/storage/endpoint/util.go index 62b170a1a8e..cf1e4ef2315 100644 --- a/pkg/storage/endpoint/util.go +++ b/pkg/storage/endpoint/util.go @@ -15,12 +15,14 @@ package endpoint import ( + "context" "encoding/json" "strings" "github.com/gogo/protobuf/proto" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/storage/kv" + "github.com/tikv/pd/pkg/utils/etcdutil" "go.etcd.io/etcd/clientv3" ) @@ -74,3 +76,31 @@ func (se *StorageEndpoint) loadRangeByPrefix(prefix string, f func(k, v string)) nextKey = keys[len(keys)-1] + "\x00" } } + +// TxnStorage is the interface with RunInTxn +type TxnStorage interface { + RunInTxn(ctx context.Context, f func(txn kv.Txn) error) error +} + +// RunBatchOpInTxn runs a batch of operations in transaction. +// The batch is split into multiple transactions if it exceeds the maximum number of operations per transaction. +func RunBatchOpInTxn(ctx context.Context, storage TxnStorage, batch []func(kv.Txn) error) error { + for start := 0; start < len(batch); start += etcdutil.MaxEtcdTxnOps { + end := start + etcdutil.MaxEtcdTxnOps + if end > len(batch) { + end = len(batch) + } + err := storage.RunInTxn(ctx, func(txn kv.Txn) (err error) { + for _, op := range batch[start:end] { + if err = op(txn); err != nil { + return err + } + } + return nil + }) + if err != nil { + return err + } + } + return nil +} diff --git a/pkg/tso/keyspace_group_manager.go b/pkg/tso/keyspace_group_manager.go index c48c066a2aa..77967a97ef7 100644 --- a/pkg/tso/keyspace_group_manager.go +++ b/pkg/tso/keyspace_group_manager.go @@ -22,7 +22,6 @@ import ( "net/http" "regexp" "sort" - "strings" "sync" "time" @@ -365,8 +364,6 @@ type KeyspaceGroupManager struct { // cfg is the TSO config cfg ServiceConfig - // loadKeyspaceGroupsTimeout is the timeout for loading the initial keyspace group assignment. - loadKeyspaceGroupsTimeout time.Duration loadKeyspaceGroupsBatchSize int64 loadFromEtcdMaxRetryTimes int @@ -485,8 +482,6 @@ func (kgm *KeyspaceGroupManager) GetServiceConfig() ServiceConfig { // Key: /ms/{cluster_id}/tso/registry/{tsoServerAddress} // Value: discover.ServiceRegistryEntry func (kgm *KeyspaceGroupManager) InitializeTSOServerWatchLoop() error { - tsoServiceEndKey := clientv3.GetPrefixRangeEnd(kgm.tsoServiceKey) + "/" - putFn := func(kv *mvccpb.KeyValue) error { s := &discovery.ServiceRegistryEntry{} if err := json.Unmarshal(kv.Value, s); err != nil { @@ -518,7 +513,7 @@ func (kgm *KeyspaceGroupManager) InitializeTSOServerWatchLoop() error { putFn, deleteFn, func([]*clientv3.Event) error { return nil }, - clientv3.WithRange(tsoServiceEndKey), + true, /* withPrefix */ ) kgm.tsoNodesWatcher.StartWatchLoop() if err := kgm.tsoNodesWatcher.WaitLoad(); err != nil { @@ -535,9 +530,7 @@ func (kgm *KeyspaceGroupManager) InitializeTSOServerWatchLoop() error { // Value: endpoint.KeyspaceGroup func (kgm *KeyspaceGroupManager) InitializeGroupWatchLoop() error { rootPath := kgm.legacySvcRootPath - startKey := strings.Join([]string{rootPath, endpoint.KeyspaceGroupIDPath(mcsutils.DefaultKeyspaceGroupID)}, "/") - endKey := strings.Join( - []string{rootPath, clientv3.GetPrefixRangeEnd(endpoint.KeyspaceGroupIDPrefix())}, "/") + startKey := rootPath + "/" + endpoint.KeyspaceGroupIDPrefix() defaultKGConfigured := false putFn := func(kv *mvccpb.KeyValue) error { @@ -577,11 +570,8 @@ func (kgm *KeyspaceGroupManager) InitializeGroupWatchLoop() error { putFn, deleteFn, postEventsFn, - clientv3.WithRange(endKey), + true, /* withPrefix */ ) - if kgm.loadKeyspaceGroupsTimeout > 0 { - kgm.groupWatcher.SetLoadTimeout(kgm.loadKeyspaceGroupsTimeout) - } if kgm.loadFromEtcdMaxRetryTimes > 0 { kgm.groupWatcher.SetLoadRetryTimes(kgm.loadFromEtcdMaxRetryTimes) } diff --git a/pkg/tso/keyspace_group_manager_test.go b/pkg/tso/keyspace_group_manager_test.go index 54a1adc6b34..d3d5f8256e6 100644 --- a/pkg/tso/keyspace_group_manager_test.go +++ b/pkg/tso/keyspace_group_manager_test.go @@ -31,7 +31,6 @@ import ( "github.com/pingcap/failpoint" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/mcs/discovery" mcsutils "github.com/tikv/pd/pkg/mcs/utils" "github.com/tikv/pd/pkg/storage/endpoint" @@ -228,29 +227,6 @@ func (suite *keyspaceGroupManagerTestSuite) TestLoadWithDifferentBatchSize() { } } -// TestLoadKeyspaceGroupsTimeout tests there is timeout when loading the initial keyspace group assignment -// from etcd. The initialization of the keyspace group manager should fail. -func (suite *keyspaceGroupManagerTestSuite) TestLoadKeyspaceGroupsTimeout() { - re := suite.Require() - - mgr := suite.newUniqueKeyspaceGroupManager(1) - re.NotNil(mgr) - defer mgr.Close() - - addKeyspaceGroupAssignment( - suite.ctx, suite.etcdClient, uint32(0), mgr.legacySvcRootPath, - []string{mgr.tsoServiceID.ServiceAddr}, []int{0}, []uint32{0}) - - // Set the timeout to 1 second and inject the delayLoad to return 3 seconds to let - // the loading sleep 3 seconds. - mgr.loadKeyspaceGroupsTimeout = time.Second - re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/utils/etcdutil/delayLoad", "return(3)")) - err := mgr.Initialize() - // If loading keyspace groups timeout, the initialization should fail with ErrLoadKeyspaceGroupsTerminated. - re.Contains(err.Error(), errs.ErrLoadKeyspaceGroupsTerminated.Error()) - re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/utils/etcdutil/delayLoad")) -} - // TestLoadKeyspaceGroupsSucceedWithTempFailures tests the initialization should succeed when there are temporary // failures during loading the initial keyspace group assignment from etcd. func (suite *keyspaceGroupManagerTestSuite) TestLoadKeyspaceGroupsSucceedWithTempFailures() { diff --git a/pkg/utils/etcdutil/etcdutil.go b/pkg/utils/etcdutil/etcdutil.go index 11c640fe4ef..7db4d1ea56a 100644 --- a/pkg/utils/etcdutil/etcdutil.go +++ b/pkg/utils/etcdutil/etcdutil.go @@ -17,10 +17,10 @@ package etcdutil import ( "context" "crypto/tls" - "fmt" "math/rand" "net/http" "net/url" + "strings" "sync" "time" @@ -39,6 +39,7 @@ import ( "go.etcd.io/etcd/mvcc/mvccpb" "go.etcd.io/etcd/pkg/types" "go.uber.org/zap" + "google.golang.org/grpc/codes" ) const ( @@ -112,13 +113,13 @@ func AddEtcdMember(client *clientv3.Client, urls []string) (*clientv3.MemberAddR } // ListEtcdMembers returns a list of internal etcd members. -func ListEtcdMembers(client *clientv3.Client) (*clientv3.MemberListResponse, error) { +func ListEtcdMembers(ctx context.Context, client *clientv3.Client) (*clientv3.MemberListResponse, error) { failpoint.Inject("SlowEtcdMemberList", func(val failpoint.Value) { d := val.(int) time.Sleep(time.Duration(d) * time.Second) }) - ctx, cancel := context.WithTimeout(client.Ctx(), DefaultRequestTimeout) - listResp, err := client.MemberList(ctx) + newCtx, cancel := context.WithTimeout(ctx, DefaultRequestTimeout) + listResp, err := client.MemberList(newCtx) cancel() if err != nil { return listResp, errs.ErrEtcdMemberList.Wrap(err).GenWithStackByCause() @@ -269,209 +270,13 @@ func CreateEtcdClient(tlsConfig *tls.Config, acURLs []url.URL) (*clientv3.Client failpoint.Inject("closeTick", func() { failpoint.Return(client, err) }) - - checker := &healthyChecker{ - tlsConfig: tlsConfig, - } - eps := syncUrls(client) - checker.update(eps) - - // Create a goroutine to check the health of etcd endpoints periodically. - go func(client *clientv3.Client) { - defer logutil.LogPanic() - ticker := time.NewTicker(tickerInterval) - defer ticker.Stop() - lastAvailable := time.Now() - for { - select { - case <-client.Ctx().Done(): - log.Info("etcd client is closed, exit health check goroutine") - checker.Range(func(key, value interface{}) bool { - client := value.(*healthyClient) - client.Close() - return true - }) - return - case <-ticker.C: - usedEps := client.Endpoints() - healthyEps := checker.patrol(client.Ctx()) - if len(healthyEps) == 0 { - // when all endpoints are unhealthy, try to reset endpoints to update connect - // rather than delete them to avoid there is no any endpoint in client. - // Note: reset endpoints will trigger subconn closed, and then trigger reconnect. - // otherwise, the subconn will be retrying in grpc layer and use exponential backoff, - // and it cannot recover as soon as possible. - if time.Since(lastAvailable) > etcdServerDisconnectedTimeout { - log.Info("no available endpoint, try to reset endpoints", zap.Strings("last-endpoints", usedEps)) - client.SetEndpoints([]string{}...) - client.SetEndpoints(usedEps...) - } - } else { - if !typeutil.AreStringSlicesEquivalent(healthyEps, usedEps) { - client.SetEndpoints(healthyEps...) - change := fmt.Sprintf("%d->%d", len(usedEps), len(healthyEps)) - etcdStateGauge.WithLabelValues("endpoints").Set(float64(len(healthyEps))) - log.Info("update endpoints", zap.String("num-change", change), - zap.Strings("last-endpoints", usedEps), zap.Strings("endpoints", client.Endpoints())) - } - lastAvailable = time.Now() - } - } - } - }(client) - - // Notes: use another goroutine to update endpoints to avoid blocking health check in the first goroutine. - go func(client *clientv3.Client) { - defer logutil.LogPanic() - ticker := time.NewTicker(tickerInterval) - defer ticker.Stop() - for { - select { - case <-client.Ctx().Done(): - log.Info("etcd client is closed, exit update endpoint goroutine") - return - case <-ticker.C: - eps := syncUrls(client) - checker.update(eps) - } - } - }(client) + initHealthChecker(tickerInterval, tlsConfig, client) return client, err } -type healthyClient struct { - *clientv3.Client - lastHealth time.Time -} - -type healthyChecker struct { - sync.Map // map[string]*healthyClient - tlsConfig *tls.Config -} - -func (checker *healthyChecker) patrol(ctx context.Context) []string { - // See https://github.com/etcd-io/etcd/blob/85b640cee793e25f3837c47200089d14a8392dc7/etcdctl/ctlv3/command/ep_command.go#L105-L145 - var wg sync.WaitGroup - count := 0 - checker.Range(func(key, value interface{}) bool { - count++ - return true - }) - hch := make(chan string, count) - healthyList := make([]string, 0, count) - checker.Range(func(key, value interface{}) bool { - wg.Add(1) - go func(key, value interface{}) { - defer wg.Done() - defer logutil.LogPanic() - ep := key.(string) - client := value.(*healthyClient) - if IsHealthy(ctx, client.Client) { - hch <- ep - checker.Store(ep, &healthyClient{ - Client: client.Client, - lastHealth: time.Now(), - }) - return - } - }(key, value) - return true - }) - wg.Wait() - close(hch) - for h := range hch { - healthyList = append(healthyList, h) - } - return healthyList -} - -func (checker *healthyChecker) update(eps []string) { - epMap := make(map[string]struct{}) - for _, ep := range eps { - epMap[ep] = struct{}{} - } - - for ep := range epMap { - // check if client exists, if not, create one, if exists, check if it's offline or disconnected. - if client, ok := checker.Load(ep); ok { - lastHealthy := client.(*healthyClient).lastHealth - if time.Since(lastHealthy) > etcdServerOfflineTimeout { - log.Info("some etcd server maybe offline", zap.String("endpoint", ep)) - checker.removeClient(ep) - } - if time.Since(lastHealthy) > etcdServerDisconnectedTimeout { - // try to reset client endpoint to trigger reconnect - client.(*healthyClient).Client.SetEndpoints([]string{}...) - client.(*healthyClient).Client.SetEndpoints(ep) - } - continue - } - checker.addClient(ep, time.Now()) - } - - // check if there are some stale clients, if exists, remove them. - checker.Range(func(key, value interface{}) bool { - ep := key.(string) - if _, ok := epMap[ep]; !ok { - log.Info("remove stale etcd client", zap.String("endpoint", ep)) - checker.removeClient(ep) - } - return true - }) -} - -func (checker *healthyChecker) addClient(ep string, lastHealth time.Time) { - client, err := newClient(checker.tlsConfig, ep) - if err != nil { - log.Error("failed to create etcd healthy client", zap.Error(err)) - return - } - checker.Store(ep, &healthyClient{ - Client: client, - lastHealth: lastHealth, - }) -} - -func (checker *healthyChecker) removeClient(ep string) { - if client, ok := checker.LoadAndDelete(ep); ok { - err := client.(*healthyClient).Close() - if err != nil { - log.Error("failed to close etcd healthy client", zap.Error(err)) - } - } -} - -func syncUrls(client *clientv3.Client) []string { - // See https://github.com/etcd-io/etcd/blob/85b640cee793e25f3837c47200089d14a8392dc7/clientv3/client.go#L170-L183 - ctx, cancel := context.WithTimeout(clientv3.WithRequireLeader(client.Ctx()), DefaultRequestTimeout) - defer cancel() - mresp, err := client.MemberList(ctx) - if err != nil { - log.Error("failed to list members", errs.ZapError(err)) - return []string{} - } - var eps []string - for _, m := range mresp.Members { - if len(m.Name) != 0 && !m.IsLearner { - eps = append(eps, m.ClientURLs...) - } - } - return eps -} - -// CreateClients creates etcd v3 client and http client. -func CreateClients(tlsConfig *tls.Config, acUrls []url.URL) (*clientv3.Client, *http.Client, error) { - client, err := CreateEtcdClient(tlsConfig, acUrls) - if err != nil { - return nil, nil, errs.ErrNewEtcdClient.Wrap(err).GenWithStackByCause() - } - httpClient := createHTTPClient(tlsConfig) - return client, httpClient, nil -} - -// createHTTPClient creates a http client with the given tls config. -func createHTTPClient(tlsConfig *tls.Config) *http.Client { +// CreateHTTPClient creates a http client with the given tls config. +func CreateHTTPClient(tlsConfig *tls.Config) *http.Client { // FIXME: Currently, there is no timeout set for certain requests, such as GetRegions, // which may take a significant amount of time. However, it might be necessary to // define an appropriate timeout in the future. @@ -558,12 +363,10 @@ func InitOrGetClusterID(c *clientv3.Client, key string) (uint64, error) { } const ( - defaultLoadDataFromEtcdTimeout = 30 * time.Second - defaultLoadFromEtcdRetryInterval = 200 * time.Millisecond - defaultLoadFromEtcdRetryTimes = int(defaultLoadDataFromEtcdTimeout / defaultLoadFromEtcdRetryInterval) - defaultLoadBatchSize = 400 - defaultWatchChangeRetryInterval = 1 * time.Second - defaultForceLoadMinimalInterval = 200 * time.Millisecond + defaultEtcdRetryInterval = time.Second + defaultLoadFromEtcdRetryTimes = 3 + maxLoadBatchSize = int64(10000) + minLoadBatchSize = int64(100) // RequestProgressInterval is the interval to call RequestProgress for watcher. RequestProgressInterval = 1 * time.Second @@ -580,8 +383,8 @@ type LoopWatcher struct { // key is the etcd key to watch. key string - // opts is used to set etcd options. - opts []clientv3.OpOption + // isWithPrefix indicates whether the watcher is with prefix. + isWithPrefix bool // forceLoadCh is used to force loading data from etcd. forceLoadCh chan struct{} @@ -602,8 +405,6 @@ type LoopWatcher struct { // lastTimeForceLoad is used to record the last time force loading data from etcd. lastTimeForceLoad time.Time - // loadTimeout is used to set the timeout for loading data from etcd. - loadTimeout time.Duration // loadRetryTimes is used to set the retry times for loading data from etcd. loadRetryTimes int // loadBatchSize is used to set the batch size for loading data from etcd. @@ -623,7 +424,7 @@ func NewLoopWatcher( preEventsFn func([]*clientv3.Event) error, putFn, deleteFn func(*mvccpb.KeyValue) error, postEventsFn func([]*clientv3.Event) error, - opts ...clientv3.OpOption, + isWithPrefix bool, ) *LoopWatcher { return &LoopWatcher{ ctx: ctx, @@ -638,12 +439,11 @@ func NewLoopWatcher( deleteFn: deleteFn, postEventsFn: postEventsFn, preEventsFn: preEventsFn, - opts: opts, + isWithPrefix: isWithPrefix, lastTimeForceLoad: time.Now(), - loadTimeout: defaultLoadDataFromEtcdTimeout, loadRetryTimes: defaultLoadFromEtcdRetryTimes, - loadBatchSize: defaultLoadBatchSize, - watchChangeRetryInterval: defaultWatchChangeRetryInterval, + loadBatchSize: maxLoadBatchSize, + watchChangeRetryInterval: defaultEtcdRetryInterval, } } @@ -689,11 +489,8 @@ func (lw *LoopWatcher) initFromEtcd(ctx context.Context) int64 { watchStartRevision int64 err error ) - ticker := time.NewTicker(defaultLoadFromEtcdRetryInterval) + ticker := time.NewTicker(defaultEtcdRetryInterval) defer ticker.Stop() - ctx, cancel := context.WithTimeout(ctx, lw.loadTimeout) - defer cancel() - for i := 0; i < lw.loadRetryTimes; i++ { failpoint.Inject("loadTemporaryFail", func(val failpoint.Value) { if maxFailTimes, ok := val.(int); ok && i < maxFailTimes { @@ -701,11 +498,6 @@ func (lw *LoopWatcher) initFromEtcd(ctx context.Context) int64 { failpoint.Continue() } }) - failpoint.Inject("delayLoad", func(val failpoint.Value) { - if sleepIntervalSeconds, ok := val.(int); ok && sleepIntervalSeconds > 0 { - time.Sleep(time.Duration(sleepIntervalSeconds) * time.Second) - } - }) watchStartRevision, err = lw.load(ctx) if err == nil { break @@ -754,7 +546,10 @@ func (lw *LoopWatcher) watch(ctx context.Context, revision int64) (nextRevision // make sure to wrap context with "WithRequireLeader". watcherCtx, cancel := context.WithCancel(clientv3.WithRequireLeader(ctx)) watcherCancel = cancel - opts := append(lw.opts, clientv3.WithRev(revision), clientv3.WithProgressNotify()) + opts := []clientv3.OpOption{clientv3.WithRev(revision), clientv3.WithProgressNotify()} + if lw.isWithPrefix { + opts = append(opts, clientv3.WithPrefix()) + } done := make(chan struct{}) go grpcutil.CheckStream(watcherCtx, watcherCancel, done) watchChan := watcher.Watch(watcherCtx, lw.key, opts...) @@ -864,15 +659,10 @@ func (lw *LoopWatcher) watch(ctx context.Context, revision int64) (nextRevision } func (lw *LoopWatcher) load(ctx context.Context) (nextRevision int64, err error) { - ctx, cancel := context.WithTimeout(ctx, DefaultRequestTimeout) - defer cancel() startKey := lw.key - // If limit is 0, it means no limit. - // If limit is not 0, we need to add 1 to limit to get the next key. limit := lw.loadBatchSize - if limit != 0 { - limit++ - } + opts := lw.buildLoadingOpts(limit) + if err := lw.preEventsFn([]*clientv3.Event{}); err != nil { log.Error("run pre event failed in watch loop", zap.String("name", lw.name), zap.String("key", lw.key), zap.Error(err)) @@ -883,21 +673,43 @@ func (lw *LoopWatcher) load(ctx context.Context) (nextRevision int64, err error) zap.String("key", lw.key), zap.Error(err)) } }() + for { - // Sort by key to get the next key and we don't need to worry about the performance, - // Because the default sort is just SortByKey and SortAscend - opts := append(lw.opts, clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend), clientv3.WithLimit(limit)) - resp, err := clientv3.NewKV(lw.client).Get(ctx, startKey, opts...) + select { + case <-ctx.Done(): + return 0, nil + default: + } + resp, err := EtcdKVGet(lw.client, startKey, opts...) + failpoint.Inject("meetEtcdError", func() { + if limit > minLoadBatchSize { + err = errors.New(codes.ResourceExhausted.String()) + } + }) if err != nil { log.Error("load failed in watch loop", zap.String("name", lw.name), zap.String("key", lw.key), zap.Error(err)) + if strings.Contains(err.Error(), codes.ResourceExhausted.String()) || + strings.Contains(err.Error(), codes.DeadlineExceeded.String()) { + if limit == 0 { + limit = maxLoadBatchSize + } else if limit > minLoadBatchSize { + limit /= 2 + } else { + return 0, err + } + opts = lw.buildLoadingOpts(limit) + continue + } return 0, err } for i, item := range resp.Kvs { - if resp.More && i == len(resp.Kvs)-1 { - // The last key is the start key of the next batch. - // To avoid to get the same key in the next load, we need to skip the last key. + if i == len(resp.Kvs)-1 && resp.More { + // If there are more keys, we need to load the next batch. + // The last key in current batch is the start key of the next batch. startKey = string(item.Key) + // To avoid to get the same key in the next batch, + // we need to skip the last key for the current batch. continue } err = lw.putFn(item) @@ -916,6 +728,27 @@ func (lw *LoopWatcher) load(ctx context.Context) (nextRevision int64, err error) } } +func (lw *LoopWatcher) buildLoadingOpts(limit int64) []clientv3.OpOption { + // Sort by key to get the next key and we don't need to worry about the performance, + // Because the default sort is just SortByKey and SortAscend + opts := []clientv3.OpOption{ + clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)} + // In most cases, 'Get(foo, WithPrefix())' is equivalent to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'. + // However, when the startKey changes, the two are no longer equivalent. + // For example, the end key for 'WithRange(GetPrefixRangeEnd(foo))' is consistently 'fop'. + // But when using 'Get(foo1, WithPrefix())', the end key becomes 'foo2', not 'fop'. + // So, we use 'WithRange()' to avoid this problem. + if lw.isWithPrefix { + opts = append(opts, clientv3.WithRange(clientv3.GetPrefixRangeEnd(lw.key))) + } + // If limit is 0, it means no limit. + // If limit is not 0, we need to add 1 to limit to get the next key. + if limit == 0 { + return opts + } + return append(opts, clientv3.WithLimit(limit+1)) +} + // ForceLoad forces to load the key. func (lw *LoopWatcher) ForceLoad() { // When NotLeader error happens, a large volume of force load requests will be received here, @@ -923,14 +756,14 @@ func (lw *LoopWatcher) ForceLoad() { // Two-phase locking is also used to let most of the requests return directly without acquiring // the write lock and causing the system to choke. lw.forceLoadMu.RLock() - if time.Since(lw.lastTimeForceLoad) < defaultForceLoadMinimalInterval { + if time.Since(lw.lastTimeForceLoad) < defaultEtcdRetryInterval { lw.forceLoadMu.RUnlock() return } lw.forceLoadMu.RUnlock() lw.forceLoadMu.Lock() - if time.Since(lw.lastTimeForceLoad) < defaultForceLoadMinimalInterval { + if time.Since(lw.lastTimeForceLoad) < defaultEtcdRetryInterval { lw.forceLoadMu.Unlock() return } @@ -953,11 +786,6 @@ func (lw *LoopWatcher) SetLoadRetryTimes(times int) { lw.loadRetryTimes = times } -// SetLoadTimeout sets the timeout when loading data from etcd. -func (lw *LoopWatcher) SetLoadTimeout(timeout time.Duration) { - lw.loadTimeout = timeout -} - // SetLoadBatchSize sets the batch size when loading data from etcd. func (lw *LoopWatcher) SetLoadBatchSize(size int64) { lw.loadBatchSize = size diff --git a/pkg/utils/etcdutil/etcdutil_test.go b/pkg/utils/etcdutil/etcdutil_test.go index d415d2d1873..55af4c92a2d 100644 --- a/pkg/utils/etcdutil/etcdutil_test.go +++ b/pkg/utils/etcdutil/etcdutil_test.go @@ -53,7 +53,7 @@ func TestMemberHelpers(t *testing.T) { etcd1, cfg1 := servers[0], servers[0].Config() // Test ListEtcdMembers - listResp1, err := ListEtcdMembers(client1) + listResp1, err := ListEtcdMembers(client1.Ctx(), client1) re.NoError(err) re.Len(listResp1.Members, 1) // types.ID is an alias of uint64. @@ -74,7 +74,7 @@ func TestMemberHelpers(t *testing.T) { _, err = RemoveEtcdMember(client1, uint64(etcd2.Server.ID())) re.NoError(err) - listResp3, err := ListEtcdMembers(client1) + listResp3, err := ListEtcdMembers(client1.Ctx(), client1) re.NoError(err) re.Len(listResp3.Members, 1) re.Equal(uint64(etcd1.Server.ID()), listResp3.Members[0].ID) @@ -380,7 +380,8 @@ func (suite *loopWatcherTestSuite) SetupSuite() { suite.ctx, suite.cancel = context.WithCancel(context.Background()) suite.cleans = make([]func(), 0) // Start a etcd server and create a client with etcd1 as endpoint. - suite.config = newTestSingleConfig(suite.T()) + suite.config = NewTestSingleConfig() + suite.config.Dir = suite.T().TempDir() suite.startEtcd(re) suite.client, err = CreateEtcdClient(nil, suite.config.LCUrls) re.NoError(err) @@ -397,36 +398,57 @@ func (suite *loopWatcherTestSuite) TearDownSuite() { } } -func (suite *loopWatcherTestSuite) TestLoadWithoutKey() { +func (suite *loopWatcherTestSuite) TestLoadNoExistedKey() { re := suite.Require() - cache := struct { - syncutil.RWMutex - data map[string]struct{} - }{ - data: make(map[string]struct{}), - } + cache := make(map[string]struct{}) watcher := NewLoopWatcher( suite.ctx, &suite.wg, suite.client, "test", - "TestLoadWithoutKey", + "TestLoadNoExistedKey", func([]*clientv3.Event) error { return nil }, func(kv *mvccpb.KeyValue) error { - cache.Lock() - defer cache.Unlock() - cache.data[string(kv.Key)] = struct{}{} + cache[string(kv.Key)] = struct{}{} return nil }, func(kv *mvccpb.KeyValue) error { return nil }, func([]*clientv3.Event) error { return nil }, + false, /* withPrefix */ ) watcher.StartWatchLoop() err := watcher.WaitLoad() re.NoError(err) // although no key, watcher returns no error - cache.RLock() - defer cache.RUnlock() - re.Empty(cache.data) + re.Empty(cache) +} + +func (suite *loopWatcherTestSuite) TestLoadWithLimitChange() { + re := suite.Require() + re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/utils/etcdutil/meetEtcdError", `return()`)) + cache := make(map[string]struct{}) + for i := 0; i < int(maxLoadBatchSize)*2; i++ { + suite.put(re, fmt.Sprintf("TestLoadWithLimitChange%d", i), "") + } + watcher := NewLoopWatcher( + suite.ctx, + &suite.wg, + suite.client, + "test", + "TestLoadWithLimitChange", + func([]*clientv3.Event) error { return nil }, + func(kv *mvccpb.KeyValue) error { + cache[string(kv.Key)] = struct{}{} + return nil + }, + func(kv *mvccpb.KeyValue) error { return nil }, + func([]*clientv3.Event) error { return nil }, + true, /* withPrefix */ + ) + watcher.StartWatchLoop() + err := watcher.WaitLoad() + re.NoError(err) + re.Len(cache, int(maxLoadBatchSize)*2) + re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/utils/etcdutil/meetEtcdError")) } func (suite *loopWatcherTestSuite) TestCallBack() { @@ -464,7 +486,7 @@ func (suite *loopWatcherTestSuite) TestCallBack() { result = result[:0] return nil }, - clientv3.WithPrefix(), + true, /* withPrefix */ ) watcher.StartWatchLoop() err := watcher.WaitLoad() @@ -499,12 +521,7 @@ func (suite *loopWatcherTestSuite) TestWatcherLoadLimit() { for i := 0; i < count; i++ { suite.put(re, fmt.Sprintf("TestWatcherLoadLimit%d", i), "") } - cache := struct { - syncutil.RWMutex - data []string - }{ - data: make([]string, 0), - } + cache := make([]string, 0) watcher := NewLoopWatcher( ctx, &suite.wg, @@ -513,9 +530,7 @@ func (suite *loopWatcherTestSuite) TestWatcherLoadLimit() { "TestWatcherLoadLimit", func([]*clientv3.Event) error { return nil }, func(kv *mvccpb.KeyValue) error { - cache.Lock() - defer cache.Unlock() - cache.data = append(cache.data, string(kv.Key)) + cache = append(cache, string(kv.Key)) return nil }, func(kv *mvccpb.KeyValue) error { @@ -524,19 +539,53 @@ func (suite *loopWatcherTestSuite) TestWatcherLoadLimit() { func([]*clientv3.Event) error { return nil }, - clientv3.WithPrefix(), + true, /* withPrefix */ ) + watcher.SetLoadBatchSize(int64(limit)) watcher.StartWatchLoop() err := watcher.WaitLoad() re.NoError(err) - cache.RLock() - re.Len(cache.data, count) - cache.RUnlock() + re.Len(cache, count) cancel() } } } +func (suite *loopWatcherTestSuite) TestWatcherLoadLargeKey() { + re := suite.Require() + // use default limit to test 65536 key in etcd + count := 65536 + ctx, cancel := context.WithCancel(suite.ctx) + defer cancel() + for i := 0; i < count; i++ { + suite.put(re, fmt.Sprintf("TestWatcherLoadLargeKey/test-%d", i), "") + } + cache := make([]string, 0) + watcher := NewLoopWatcher( + ctx, + &suite.wg, + suite.client, + "test", + "TestWatcherLoadLargeKey", + func([]*clientv3.Event) error { return nil }, + func(kv *mvccpb.KeyValue) error { + cache = append(cache, string(kv.Key)) + return nil + }, + func(kv *mvccpb.KeyValue) error { + return nil + }, + func([]*clientv3.Event) error { + return nil + }, + true, /* withPrefix */ + ) + watcher.StartWatchLoop() + err := watcher.WaitLoad() + re.NoError(err) + re.Len(cache, count) +} + func (suite *loopWatcherTestSuite) TestWatcherBreak() { re := suite.Require() cache := struct { @@ -568,6 +617,7 @@ func (suite *loopWatcherTestSuite) TestWatcherBreak() { }, func(kv *mvccpb.KeyValue) error { return nil }, func([]*clientv3.Event) error { return nil }, + false, /* withPrefix */ ) watcher.watchChangeRetryInterval = 100 * time.Millisecond watcher.StartWatchLoop() @@ -646,6 +696,7 @@ func (suite *loopWatcherTestSuite) TestWatcherRequestProgress() { func(kv *mvccpb.KeyValue) error { return nil }, func(kv *mvccpb.KeyValue) error { return nil }, func([]*clientv3.Event) error { return nil }, + false, /* withPrefix */ ) suite.wg.Add(1) diff --git a/pkg/utils/etcdutil/health_checker.go b/pkg/utils/etcdutil/health_checker.go new file mode 100644 index 00000000000..9ba7efa5903 --- /dev/null +++ b/pkg/utils/etcdutil/health_checker.go @@ -0,0 +1,282 @@ +// Copyright 2024 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdutil + +import ( + "context" + "crypto/tls" + "fmt" + "sync" + "time" + + "github.com/pingcap/log" + "github.com/tikv/pd/pkg/errs" + "github.com/tikv/pd/pkg/utils/logutil" + "github.com/tikv/pd/pkg/utils/typeutil" + "go.etcd.io/etcd/clientv3" + "go.uber.org/zap" +) + +// healthyClient will wrap an etcd client and record its last health time. +// The etcd client inside will only maintain one connection to the etcd server +// to make sure each healthyClient could be used to check the health of a certain +// etcd endpoint without involving the load balancer of etcd client. +type healthyClient struct { + *clientv3.Client + lastHealth time.Time +} + +// healthChecker is used to check the health of etcd endpoints. Inside the checker, +// we will maintain a map from each available etcd endpoint to its healthyClient. +type healthChecker struct { + tickerInterval time.Duration + tlsConfig *tls.Config + + // Store as endpoint(string) -> *healthyClient + healthyClients sync.Map + // client is the etcd client the health checker is guarding, it will be set with + // the checked healthy endpoints dynamically and periodically. + client *clientv3.Client +} + +// initHealthChecker initializes the health checker for etcd client. +func initHealthChecker(tickerInterval time.Duration, tlsConfig *tls.Config, client *clientv3.Client) { + healthChecker := &healthChecker{ + tickerInterval: tickerInterval, + tlsConfig: tlsConfig, + client: client, + } + // A health checker has the same lifetime with the given etcd client. + ctx := client.Ctx() + // Sync etcd endpoints and check the last health time of each endpoint periodically. + go healthChecker.syncer(ctx) + // Inspect the health of each endpoint by reading the health key periodically. + go healthChecker.inspector(ctx) +} + +func (checker *healthChecker) syncer(ctx context.Context) { + defer logutil.LogPanic() + checker.update() + ticker := time.NewTicker(checker.tickerInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + log.Info("etcd client is closed, exit update endpoint goroutine") + return + case <-ticker.C: + checker.update() + } + } +} + +func (checker *healthChecker) inspector(ctx context.Context) { + defer logutil.LogPanic() + ticker := time.NewTicker(checker.tickerInterval) + defer ticker.Stop() + lastAvailable := time.Now() + for { + select { + case <-ctx.Done(): + log.Info("etcd client is closed, exit health check goroutine") + checker.close() + return + case <-ticker.C: + lastEps := checker.client.Endpoints() + healthyEps := checker.patrol(ctx) + if len(healthyEps) == 0 { + // when no endpoint could be used, try to reset endpoints to update connect rather + // than delete them to avoid there is no any endpoint in client. + // Note: reset endpoints will trigger sub-connection closed, and then trigger reconnection. + // Otherwise, the sub-connection will be retrying in gRPC layer and use exponential backoff, + // and it cannot recover as soon as possible. + if time.Since(lastAvailable) > etcdServerDisconnectedTimeout { + log.Info("no available endpoint, try to reset endpoints", + zap.Strings("last-endpoints", lastEps)) + resetClientEndpoints(checker.client, lastEps...) + } + } else { + if !typeutil.AreStringSlicesEquivalent(healthyEps, lastEps) { + oldNum, newNum := len(lastEps), len(healthyEps) + checker.client.SetEndpoints(healthyEps...) + etcdStateGauge.WithLabelValues("endpoints").Set(float64(newNum)) + log.Info("update endpoints", + zap.String("num-change", fmt.Sprintf("%d->%d", oldNum, newNum)), + zap.Strings("last-endpoints", lastEps), + zap.Strings("endpoints", checker.client.Endpoints())) + } + lastAvailable = time.Now() + } + } + } +} + +func (checker *healthChecker) close() { + checker.healthyClients.Range(func(key, value interface{}) bool { + client := value.(*healthyClient) + client.Close() + return true + }) +} + +// Reset the etcd client endpoints to trigger reconnect. +func resetClientEndpoints(client *clientv3.Client, endpoints ...string) { + client.SetEndpoints() + client.SetEndpoints(endpoints...) +} + +// See https://github.com/etcd-io/etcd/blob/85b640cee793e25f3837c47200089d14a8392dc7/etcdctl/ctlv3/command/ep_command.go#L105-L145 +func (checker *healthChecker) patrol(ctx context.Context) []string { + var ( + count = checker.clientCount() + hch = make(chan string, count) + healthyList = make([]string, 0, count) + wg sync.WaitGroup + ) + checker.healthyClients.Range(func(key, value interface{}) bool { + wg.Add(1) + go func(key, value interface{}) { + defer wg.Done() + defer logutil.LogPanic() + var ( + ep = key.(string) + client = value.(*healthyClient) + ) + if IsHealthy(ctx, client.Client) { + hch <- ep + checker.storeClient(ep, &healthyClient{ + Client: client.Client, + lastHealth: time.Now(), + }) + return + } + }(key, value) + return true + }) + wg.Wait() + close(hch) + for h := range hch { + healthyList = append(healthyList, h) + } + return healthyList +} + +func (checker *healthChecker) update() { + eps := syncUrls(checker.client) + if len(eps) == 0 { + log.Warn("no available etcd endpoint returned by etcd cluster") + return + } + epMap := make(map[string]struct{}, len(eps)) + for _, ep := range eps { + epMap[ep] = struct{}{} + } + // Check if client exists: + // - If not, create one. + // - If exists, check if it's offline or disconnected for a long time. + for ep := range epMap { + client := checker.loadClient(ep) + if client == nil { + checker.addClient(ep, time.Now()) + continue + } + since := time.Since(client.lastHealth) + // Check if it's offline for a long time and try to remove it. + if since > etcdServerOfflineTimeout { + log.Info("etcd server might be offline, try to remove it", + zap.Duration("since-last-health", since), + zap.String("endpoint", ep)) + checker.removeClient(ep) + continue + } + // Check if it's disconnected for a long time and try to reconnect. + if since > etcdServerDisconnectedTimeout { + log.Info("etcd server might be disconnected, try to reconnect", + zap.Duration("since-last-health", since), + zap.String("endpoint", ep)) + resetClientEndpoints(client.Client, ep) + } + } + // Clean up the stale clients which are not in the etcd cluster anymore. + checker.healthyClients.Range(func(key, value interface{}) bool { + ep := key.(string) + if _, ok := epMap[ep]; !ok { + log.Info("remove stale etcd client", zap.String("endpoint", ep)) + checker.removeClient(ep) + } + return true + }) +} + +func (checker *healthChecker) clientCount() int { + count := 0 + checker.healthyClients.Range(func(_, _ interface{}) bool { + count++ + return true + }) + return count +} + +func (checker *healthChecker) loadClient(ep string) *healthyClient { + if client, ok := checker.healthyClients.Load(ep); ok { + return client.(*healthyClient) + } + return nil +} + +func (checker *healthChecker) addClient(ep string, lastHealth time.Time) { + client, err := newClient(checker.tlsConfig, ep) + if err != nil { + log.Error("failed to create etcd healthy client", + zap.String("endpoint", ep), + zap.Error(err)) + return + } + checker.healthyClients.Store(ep, &healthyClient{ + Client: client, + lastHealth: lastHealth, + }) +} + +func (checker *healthChecker) storeClient(ep string, client *healthyClient) { + checker.healthyClients.Store(ep, client) +} + +func (checker *healthChecker) removeClient(ep string) { + if client, ok := checker.healthyClients.LoadAndDelete(ep); ok { + err := client.(*healthyClient).Close() + if err != nil { + log.Error("failed to close etcd healthy client", + zap.String("endpoint", ep), + zap.Error(err)) + } + } +} + +// See https://github.com/etcd-io/etcd/blob/85b640cee793e25f3837c47200089d14a8392dc7/clientv3/client.go#L170-L183 +func syncUrls(client *clientv3.Client) (eps []string) { + resp, err := ListEtcdMembers(clientv3.WithRequireLeader(client.Ctx()), client) + if err != nil { + log.Error("failed to list members", errs.ZapError(err)) + return nil + } + for _, m := range resp.Members { + if len(m.Name) == 0 || m.IsLearner { + continue + } + eps = append(eps, m.ClientURLs...) + } + return eps +} diff --git a/pkg/utils/etcdutil/testutil.go b/pkg/utils/etcdutil/testutil.go index 54ba38b93b6..57f7200ecb8 100644 --- a/pkg/utils/etcdutil/testutil.go +++ b/pkg/utils/etcdutil/testutil.go @@ -29,11 +29,10 @@ import ( "go.etcd.io/etcd/etcdserver/etcdserverpb" ) -// newTestSingleConfig is used to create a etcd config for the unit test purpose. -func newTestSingleConfig(t *testing.T) *embed.Config { +// NewTestSingleConfig is used to create a etcd config for the unit test purpose. +func NewTestSingleConfig() *embed.Config { cfg := embed.NewConfig() cfg.Name = genRandName() - cfg.Dir = t.TempDir() cfg.WalDir = "" cfg.Logger = "zap" cfg.LogOutputs = []string{"stdout"} @@ -60,7 +59,8 @@ func NewTestEtcdCluster(t *testing.T, count int) (servers []*embed.Etcd, etcdCli re := require.New(t) servers = make([]*embed.Etcd, 0, count) - cfg := newTestSingleConfig(t) + cfg := NewTestSingleConfig() + cfg.Dir = t.TempDir() etcd, err := embed.StartEtcd(cfg) re.NoError(err) etcdClient, err = CreateEtcdClient(nil, cfg.LCUrls) @@ -70,7 +70,7 @@ func NewTestEtcdCluster(t *testing.T, count int) (servers []*embed.Etcd, etcdCli for i := 1; i < count; i++ { // Check the client can get the new member. - listResp, err := ListEtcdMembers(etcdClient) + listResp, err := ListEtcdMembers(etcdClient.Ctx(), etcdClient) re.NoError(err) re.Len(listResp.Members, i) // Add a new member. @@ -98,7 +98,8 @@ func NewTestEtcdCluster(t *testing.T, count int) (servers []*embed.Etcd, etcdCli // MustAddEtcdMember is used to add a new etcd member to the cluster for test. func MustAddEtcdMember(t *testing.T, cfg1 *embed.Config, client *clientv3.Client) *embed.Etcd { re := require.New(t) - cfg2 := newTestSingleConfig(t) + cfg2 := NewTestSingleConfig() + cfg2.Dir = t.TempDir() cfg2.Name = genRandName() cfg2.InitialCluster = cfg1.InitialCluster + fmt.Sprintf(",%s=%s", cfg2.Name, &cfg2.LPUrls[0]) cfg2.ClusterState = embed.ClusterStateFlagExisting @@ -107,7 +108,7 @@ func MustAddEtcdMember(t *testing.T, cfg1 *embed.Config, client *clientv3.Client re.NoError(err) // Check the client can get the new member. testutil.Eventually(re, func() bool { - members, err := ListEtcdMembers(client) + members, err := ListEtcdMembers(client.Ctx(), client) re.NoError(err) return len(addResp.Members) == len(members.Members) }) @@ -121,7 +122,7 @@ func MustAddEtcdMember(t *testing.T, cfg1 *embed.Config, client *clientv3.Client func checkMembers(re *require.Assertions, client *clientv3.Client, etcds []*embed.Etcd) { // Check the client can get the new member. - listResp, err := ListEtcdMembers(client) + listResp, err := ListEtcdMembers(client.Ctx(), client) re.NoError(err) re.Len(listResp.Members, len(etcds)) inList := func(m *etcdserverpb.Member) bool { diff --git a/scripts/ci-subtask.sh b/scripts/ci-subtask.sh index 8fb5a3dc827..40f2863e1f9 100755 --- a/scripts/ci-subtask.sh +++ b/scripts/ci-subtask.sh @@ -8,20 +8,14 @@ if [[ $2 -gt 10 ]]; then integrations_dir=./tests/integrations integrations_tasks=($(find "$integrations_dir" -mindepth 1 -maxdepth 1 -type d)) # Currently, we only have 3 integration tests, so we can hardcode the task index. - for t in ${integrations_tasks[@]}; do - if [[ "$t" = "$integrations_dir/client" && "$2" = 11 ]]; then - cd ./client && make ci-test-job && cd .. && cat ./client/covprofile >> covprofile - cd $integrations_dir && make ci-test-job test_name=client - cd $ROOT_PATH && cat $integrations_dir/client/covprofile >> covprofile - break - elif [[ "$t" = "$integrations_dir/tso" && "$2" = 12 ]]; then - cd $integrations_dir && make ci-test-job test_name=tso - cd $ROOT_PATH && cat $integrations_dir/tso/covprofile >> covprofile - break - elif [[ "$t" = "$integrations_dir/mcs" && "$2" = 13 ]]; then - cd $integrations_dir && make ci-test-job test_name=mcs - cd $ROOT_PATH && cat $integrations_dir/mcs/covprofile >> covprofile - break + for t in "${integrations_tasks[@]}"; do + if [[ "$t" = "$integrations_dir/client" && $2 -eq 11 ]]; then + cd ./client && make ci-test-job && cd .. && cat ./covprofile >> covprofile || exit 1 + cd $integrations_dir && make ci-test-job test_name=client && cat ./client/covprofile >> "$ROOT_PATH/covprofile" || exit 1 + elif [[ "$t" = "$integrations_dir/tso" && $2 -eq 12 ]]; then + cd $integrations_dir && make ci-test-job test_name=tso && cat ./tso/covprofile >> "$ROOT_PATH/covprofile" || exit 1 + elif [[ "$t" = "$integrations_dir/mcs" && $2 -eq 13 ]]; then + cd $integrations_dir && make ci-test-job test_name=mcs && cat ./mcs/covprofile >> "$ROOT_PATH/covprofile" || exit 1 fi done else diff --git a/server/api/config.go b/server/api/config.go index f87331d5e09..6037de650a0 100644 --- a/server/api/config.go +++ b/server/api/config.go @@ -181,6 +181,8 @@ func (h *confHandler) updateConfig(cfg *config.Config, key string, value interfa case "label-property": // TODO: support changing label-property case "keyspace": return h.updateKeyspaceConfig(cfg, kp[len(kp)-1], value) + case "micro-service": + return h.updateMicroServiceConfig(cfg, kp[len(kp)-1], value) } return errors.Errorf("config prefix %s not found", kp[0]) } @@ -201,6 +203,22 @@ func (h *confHandler) updateKeyspaceConfig(config *config.Config, key string, va return err } +func (h *confHandler) updateMicroServiceConfig(config *config.Config, key string, value interface{}) error { + updated, found, err := jsonutil.AddKeyValue(&config.MicroService, key, value) + if err != nil { + return err + } + + if !found { + return errors.Errorf("config item %s not found", key) + } + + if updated { + err = h.svr.SetMicroServiceConfig(config.MicroService) + } + return err +} + func (h *confHandler) updateSchedule(config *config.Config, key string, value interface{}) error { updated, found, err := jsonutil.AddKeyValue(&config.Schedule, key, value) if err != nil { diff --git a/server/api/member.go b/server/api/member.go index 3016b76088b..df8c0aee622 100644 --- a/server/api/member.go +++ b/server/api/member.go @@ -128,7 +128,7 @@ func (h *memberHandler) DeleteMemberByName(w http.ResponseWriter, r *http.Reques // Get etcd ID by name. var id uint64 name := mux.Vars(r)["name"] - listResp, err := etcdutil.ListEtcdMembers(client) + listResp, err := etcdutil.ListEtcdMembers(client.Ctx(), client) if err != nil { h.rd.JSON(w, http.StatusInternalServerError, err.Error()) return diff --git a/server/api/operator.go b/server/api/operator.go index 7ff7d2d7c51..049a343d3bd 100644 --- a/server/api/operator.go +++ b/server/api/operator.go @@ -66,7 +66,8 @@ func (h *operatorHandler) GetOperatorsByRegion(w http.ResponseWriter, r *http.Re // @Tags operator // @Summary List pending operators. -// @Param kind query string false "Specify the operator kind." Enums(admin, leader, region) +// @Param kind query string false "Specify the operator kind." Enums(admin, leader, region) +// @Param object query bool false "Whether to return as JSON object." // @Produce json // @Success 200 {array} operator.Operator // @Failure 500 {string} string "PD server failed to proceed the request." @@ -78,6 +79,7 @@ func (h *operatorHandler) GetOperators(w http.ResponseWriter, r *http.Request) { ) kinds, ok := r.URL.Query()["kind"] + _, objectFlag := r.URL.Query()["object"] if !ok { results, err = h.Handler.GetOperators() } else { @@ -88,7 +90,15 @@ func (h *operatorHandler) GetOperators(w http.ResponseWriter, r *http.Request) { h.r.JSON(w, http.StatusInternalServerError, err.Error()) return } - h.r.JSON(w, http.StatusOK, results) + if objectFlag { + objResults := make([]*operator.OpObject, len(results)) + for i, op := range results { + objResults[i] = op.ToJSONObject() + } + h.r.JSON(w, http.StatusOK, objResults) + } else { + h.r.JSON(w, http.StatusOK, results) + } } // FIXME: details of input json body params diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index 20a4a7f0bfc..abb8af80a92 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -350,23 +350,19 @@ func (c *RaftCluster) Start(s Server) error { return nil } -var once sync.Once - func (c *RaftCluster) checkServices() { if c.isAPIServiceMode { servers, err := discovery.Discover(c.etcdClient, strconv.FormatUint(c.clusterID, 10), mcsutils.SchedulingServiceName) - if err != nil || len(servers) == 0 { + if c.opt.GetMicroServiceConfig().IsSchedulingFallbackEnabled() && (err != nil || len(servers) == 0) { c.startSchedulingJobs(c, c.hbstreams) c.independentServices.Delete(mcsutils.SchedulingServiceName) } else { - if c.stopSchedulingJobs() { + if c.stopSchedulingJobs() || c.coordinator == nil { c.initCoordinator(c.ctx, c, c.hbstreams) - } else { - once.Do(func() { - c.initCoordinator(c.ctx, c, c.hbstreams) - }) } - c.independentServices.Store(mcsutils.SchedulingServiceName, true) + if !c.IsServiceIndependent(mcsutils.SchedulingServiceName) { + c.independentServices.Store(mcsutils.SchedulingServiceName, true) + } } } else { c.startSchedulingJobs(c, c.hbstreams) @@ -2506,7 +2502,7 @@ func CheckHealth(client *http.Client, members []*pdpb.Member) map[uint64]*pdpb.M // GetMembers return a slice of Members. func GetMembers(etcdClient *clientv3.Client) ([]*pdpb.Member, error) { - listResp, err := etcdutil.ListEtcdMembers(etcdClient) + listResp, err := etcdutil.ListEtcdMembers(etcdClient.Ctx(), etcdClient) if err != nil { return nil, err } diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index d5931394c1b..cc913426ed4 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -3015,8 +3015,6 @@ func TestAddScheduler(t *testing.T) { re.NoError(controller.RemoveScheduler(schedulers.BalanceLeaderName)) re.NoError(controller.RemoveScheduler(schedulers.BalanceRegionName)) re.NoError(controller.RemoveScheduler(schedulers.HotRegionName)) - re.NoError(controller.RemoveScheduler(schedulers.BalanceWitnessName)) - re.NoError(controller.RemoveScheduler(schedulers.TransferWitnessLeaderName)) re.NoError(controller.RemoveScheduler(schedulers.EvictSlowStoreName)) re.Empty(controller.GetSchedulerNames()) @@ -3112,8 +3110,6 @@ func TestPersistScheduler(t *testing.T) { re.NoError(controller.RemoveScheduler(schedulers.BalanceLeaderName)) re.NoError(controller.RemoveScheduler(schedulers.BalanceRegionName)) re.NoError(controller.RemoveScheduler(schedulers.HotRegionName)) - re.NoError(controller.RemoveScheduler(schedulers.BalanceWitnessName)) - re.NoError(controller.RemoveScheduler(schedulers.TransferWitnessLeaderName)) re.NoError(controller.RemoveScheduler(schedulers.EvictSlowStoreName)) // only remains 2 items with independent config. re.Len(controller.GetSchedulerNames(), 2) @@ -3226,8 +3222,6 @@ func TestRemoveScheduler(t *testing.T) { re.NoError(controller.RemoveScheduler(schedulers.BalanceRegionName)) re.NoError(controller.RemoveScheduler(schedulers.HotRegionName)) re.NoError(controller.RemoveScheduler(schedulers.GrantLeaderName)) - re.NoError(controller.RemoveScheduler(schedulers.BalanceWitnessName)) - re.NoError(controller.RemoveScheduler(schedulers.TransferWitnessLeaderName)) re.NoError(controller.RemoveScheduler(schedulers.EvictSlowStoreName)) // all removed sches, _, err = storage.LoadAllSchedulerConfigs() diff --git a/server/config/config.go b/server/config/config.go index da6b0e29e07..25e13d59652 100644 --- a/server/config/config.go +++ b/server/config/config.go @@ -165,6 +165,8 @@ type Config struct { Keyspace KeyspaceConfig `toml:"keyspace" json:"keyspace"` + MicroService MicroServiceConfig `toml:"micro-service" json:"micro-service"` + Controller rm.ControllerConfig `toml:"controller" json:"controller"` } @@ -249,6 +251,8 @@ const ( defaultCheckRegionSplitInterval = 50 * time.Millisecond minCheckRegionSplitInterval = 1 * time.Millisecond maxCheckRegionSplitInterval = 100 * time.Millisecond + + defaultEnableSchedulingFallback = true ) // Special keys for Labels @@ -461,6 +465,8 @@ func (c *Config) Adjust(meta *toml.MetaData, reloading bool) error { c.Keyspace.adjust(configMetaData.Child("keyspace")) + c.MicroService.adjust(configMetaData.Child("micro-service")) + c.Security.Encryption.Adjust() if len(c.Log.Format) == 0 { @@ -847,6 +853,28 @@ func (c *DRAutoSyncReplicationConfig) adjust(meta *configutil.ConfigMetaData) { } } +// MicroServiceConfig is the configuration for micro service. +type MicroServiceConfig struct { + EnableSchedulingFallback bool `toml:"enable-scheduling-fallback" json:"enable-scheduling-fallback,string"` +} + +func (c *MicroServiceConfig) adjust(meta *configutil.ConfigMetaData) { + if !meta.IsDefined("enable-scheduling-fallback") { + c.EnableSchedulingFallback = defaultEnableSchedulingFallback + } +} + +// Clone returns a copy of micro service config. +func (c *MicroServiceConfig) Clone() *MicroServiceConfig { + cfg := *c + return &cfg +} + +// IsSchedulingFallbackEnabled returns whether to enable scheduling service fallback to api service. +func (c *MicroServiceConfig) IsSchedulingFallbackEnabled() bool { + return c.EnableSchedulingFallback +} + // KeyspaceConfig is the configuration for keyspace management. type KeyspaceConfig struct { // PreAlloc contains the keyspace to be allocated during keyspace manager initialization. diff --git a/server/config/persist_options.go b/server/config/persist_options.go index 0fa1804b879..e383f519e63 100644 --- a/server/config/persist_options.go +++ b/server/config/persist_options.go @@ -52,6 +52,7 @@ type PersistOptions struct { replicationMode atomic.Value labelProperty atomic.Value keyspace atomic.Value + microService atomic.Value storeConfig atomic.Value clusterVersion unsafe.Pointer } @@ -65,6 +66,7 @@ func NewPersistOptions(cfg *Config) *PersistOptions { o.replicationMode.Store(&cfg.ReplicationMode) o.labelProperty.Store(cfg.LabelProperty) o.keyspace.Store(&cfg.Keyspace) + o.microService.Store(&cfg.MicroService) // storeConfig will be fetched from TiKV later, // set it to an empty config here first. o.storeConfig.Store(&sc.StoreConfig{}) @@ -133,6 +135,16 @@ func (o *PersistOptions) SetKeyspaceConfig(cfg *KeyspaceConfig) { o.keyspace.Store(cfg) } +// GetMicroServiceConfig returns the micro service configuration. +func (o *PersistOptions) GetMicroServiceConfig() *MicroServiceConfig { + return o.microService.Load().(*MicroServiceConfig) +} + +// SetMicroServiceConfig sets the micro service configuration. +func (o *PersistOptions) SetMicroServiceConfig(cfg *MicroServiceConfig) { + o.microService.Store(cfg) +} + // GetStoreConfig returns the store config. func (o *PersistOptions) GetStoreConfig() *sc.StoreConfig { return o.storeConfig.Load().(*sc.StoreConfig) @@ -768,6 +780,7 @@ func (o *PersistOptions) Persist(storage endpoint.ConfigStorage) error { ReplicationMode: *o.GetReplicationModeConfig(), LabelProperty: o.GetLabelPropertyConfig(), Keyspace: *o.GetKeyspaceConfig(), + MicroService: *o.GetMicroServiceConfig(), ClusterVersion: *o.GetClusterVersion(), }, StoreConfig: *o.GetStoreConfig(), @@ -799,6 +812,7 @@ func (o *PersistOptions) Reload(storage endpoint.ConfigStorage) error { o.replicationMode.Store(&cfg.ReplicationMode) o.labelProperty.Store(cfg.LabelProperty) o.keyspace.Store(&cfg.Keyspace) + o.microService.Store(&cfg.MicroService) o.storeConfig.Store(&cfg.StoreConfig) o.SetClusterVersion(&cfg.ClusterVersion) } diff --git a/server/grpc_service.go b/server/grpc_service.go index a238cc780aa..ef7020f7fee 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -1139,12 +1139,19 @@ func (s *GrpcServer) ReportBuckets(stream pdpb.PD_ReportBucketsServer) error { if buckets == nil || len(buckets.Keys) == 0 { continue } + var ( + storeLabel string + storeAddress string + ) store := rc.GetLeaderStoreByRegionID(buckets.GetRegionId()) if store == nil { - return errors.Errorf("the store of the bucket in region %v is not found ", buckets.GetRegionId()) + // As TiKV report buckets just after the region heartbeat, for new created region, PD may receive buckets report before the first region heartbeat is handled. + // So we should not return error here. + log.Warn("the store of the bucket in region is not found ", zap.Uint64("region-id", buckets.GetRegionId())) + } else { + storeLabel = strconv.FormatUint(store.GetID(), 10) + storeAddress = store.GetAddress() } - storeLabel := strconv.FormatUint(store.GetID(), 10) - storeAddress := store.GetAddress() bucketReportCounter.WithLabelValues(storeAddress, storeLabel, "report", "recv").Inc() start := time.Now() diff --git a/server/join/join.go b/server/join/join.go index f22b61edfb2..d1711063313 100644 --- a/server/join/join.go +++ b/server/join/join.go @@ -128,7 +128,7 @@ func PrepareJoinCluster(cfg *config.Config) error { } defer client.Close() - listResp, err := etcdutil.ListEtcdMembers(client) + listResp, err := etcdutil.ListEtcdMembers(client.Ctx(), client) if err != nil { return err } @@ -171,7 +171,7 @@ func PrepareJoinCluster(cfg *config.Config) error { ) for i := 0; i < listMemberRetryTimes; i++ { - listResp, err = etcdutil.ListEtcdMembers(client) + listResp, err = etcdutil.ListEtcdMembers(client.Ctx(), client) if err != nil { return err } diff --git a/server/keyspace_service.go b/server/keyspace_service.go index 11d912a5f54..09b935c2f84 100644 --- a/server/keyspace_service.go +++ b/server/keyspace_service.go @@ -116,7 +116,7 @@ func (s *KeyspaceServer) WatchKeyspaces(request *keyspacepb.WatchKeyspacesReques putFn, deleteFn, postEventsFn, - clientv3.WithRange(clientv3.GetPrefixRangeEnd(startKey)), + true, /* withPrefix */ ) watcher.StartWatchLoop() if err := watcher.WaitLoad(); err != nil { diff --git a/server/server.go b/server/server.go index fc2cc7466d0..be886a56712 100644 --- a/server/server.go +++ b/server/server.go @@ -348,36 +348,16 @@ func (s *Server) startEtcd(ctx context.Context) error { return errs.ErrCancelStartEtcd.FastGenByArgs() } - // start client - s.client, s.httpClient, err = s.startClient() + // Start the etcd and HTTP clients, then init the member. + err = s.startClient() if err != nil { return err } - - s.electionClient, err = s.startElectionClient() + err = s.initMember(newCtx, etcd) if err != nil { return err } - // update advertise peer urls. - etcdMembers, err := etcdutil.ListEtcdMembers(s.client) - if err != nil { - return err - } - etcdServerID := uint64(etcd.Server.ID()) - for _, m := range etcdMembers.Members { - if etcdServerID == m.ID { - etcdPeerURLs := strings.Join(m.PeerURLs, ",") - if s.cfg.AdvertisePeerUrls != etcdPeerURLs { - log.Info("update advertise peer urls", zap.String("from", s.cfg.AdvertisePeerUrls), zap.String("to", etcdPeerURLs)) - s.cfg.AdvertisePeerUrls = etcdPeerURLs - } - } - } - failpoint.Inject("memberNil", func() { - time.Sleep(1500 * time.Millisecond) - }) - s.member = member.NewMember(etcd, s.electionClient, etcdServerID) s.initGRPCServiceLabels() return nil } @@ -392,29 +372,51 @@ func (s *Server) initGRPCServiceLabels() { } } -func (s *Server) startClient() (*clientv3.Client, *http.Client, error) { +func (s *Server) startClient() error { tlsConfig, err := s.cfg.Security.ToTLSConfig() if err != nil { - return nil, nil, err + return err } etcdCfg, err := s.cfg.GenEmbedEtcdConfig() if err != nil { - return nil, nil, err + return err } - return etcdutil.CreateClients(tlsConfig, etcdCfg.ACUrls) -} - -func (s *Server) startElectionClient() (*clientv3.Client, error) { - tlsConfig, err := s.cfg.Security.ToTLSConfig() + /* Starting two different etcd clients here is to avoid the throttling. */ + // This etcd client will be used to access the etcd cluster to read and write all kinds of meta data. + s.client, err = etcdutil.CreateEtcdClient(tlsConfig, etcdCfg.ACUrls) if err != nil { - return nil, err + return errs.ErrNewEtcdClient.Wrap(err).GenWithStackByCause() } - etcdCfg, err := s.cfg.GenEmbedEtcdConfig() + // This etcd client will only be used to read and write the election-related data, such as leader key. + s.electionClient, err = etcdutil.CreateEtcdClient(tlsConfig, etcdCfg.ACUrls) if err != nil { - return nil, err + return errs.ErrNewEtcdClient.Wrap(err).GenWithStackByCause() } + s.httpClient = etcdutil.CreateHTTPClient(tlsConfig) + return nil +} - return etcdutil.CreateEtcdClient(tlsConfig, etcdCfg.ACUrls) +func (s *Server) initMember(ctx context.Context, etcd *embed.Etcd) error { + // Update advertise peer URLs. + etcdMembers, err := etcdutil.ListEtcdMembers(ctx, s.client) + if err != nil { + return err + } + etcdServerID := uint64(etcd.Server.ID()) + for _, m := range etcdMembers.Members { + if etcdServerID == m.ID { + etcdPeerURLs := strings.Join(m.PeerURLs, ",") + if s.cfg.AdvertisePeerUrls != etcdPeerURLs { + log.Info("update advertise peer urls", zap.String("from", s.cfg.AdvertisePeerUrls), zap.String("to", etcdPeerURLs)) + s.cfg.AdvertisePeerUrls = etcdPeerURLs + } + } + } + failpoint.Inject("memberNil", func() { + time.Sleep(1500 * time.Millisecond) + }) + s.member = member.NewMember(etcd, s.electionClient, etcdServerID) + return nil } // AddStartCallback adds a callback in the startServer phase. @@ -952,6 +954,7 @@ func (s *Server) GetConfig() *config.Config { cfg.PDServerCfg = *s.persistOptions.GetPDServerConfig().Clone() cfg.ReplicationMode = *s.persistOptions.GetReplicationModeConfig() cfg.Keyspace = *s.persistOptions.GetKeyspaceConfig().Clone() + cfg.MicroService = *s.persistOptions.GetMicroServiceConfig().Clone() cfg.LabelProperty = s.persistOptions.GetLabelPropertyConfig().Clone() cfg.ClusterVersion = *s.persistOptions.GetClusterVersion() if s.storage == nil { @@ -990,6 +993,27 @@ func (s *Server) SetKeyspaceConfig(cfg config.KeyspaceConfig) error { return nil } +// GetMicroServiceConfig gets the micro service config information. +func (s *Server) GetMicroServiceConfig() *config.MicroServiceConfig { + return s.persistOptions.GetMicroServiceConfig().Clone() +} + +// SetMicroServiceConfig sets the micro service config information. +func (s *Server) SetMicroServiceConfig(cfg config.MicroServiceConfig) error { + old := s.persistOptions.GetMicroServiceConfig() + s.persistOptions.SetMicroServiceConfig(&cfg) + if err := s.persistOptions.Persist(s.storage); err != nil { + s.persistOptions.SetMicroServiceConfig(old) + log.Error("failed to update micro service config", + zap.Reflect("new", cfg), + zap.Reflect("old", old), + errs.ZapError(err)) + return err + } + log.Info("micro service config is updated", zap.Reflect("new", cfg), zap.Reflect("old", old)) + return nil +} + // GetScheduleConfig gets the balance config information. func (s *Server) GetScheduleConfig() *sc.ScheduleConfig { return s.persistOptions.GetScheduleConfig().Clone() @@ -2034,6 +2058,7 @@ func (s *Server) initServicePrimaryWatcher(serviceName string, primaryKey string putFn, deleteFn, func([]*clientv3.Event) error { return nil }, + false, /* withPrefix */ ) } diff --git a/tests/integrations/client/client_test.go b/tests/integrations/client/client_test.go index cfc37f42628..07e5c65d286 100644 --- a/tests/integrations/client/client_test.go +++ b/tests/integrations/client/client_test.go @@ -105,6 +105,7 @@ func TestClientLeaderChange(t *testing.T) { endpoints := runServer(re, cluster) cli := setupCli(re, ctx, endpoints) + defer cli.Close() innerCli, ok := cli.(interface{ GetServiceDiscovery() pd.ServiceDiscovery }) re.True(ok) @@ -165,6 +166,7 @@ func TestLeaderTransfer(t *testing.T) { endpoints := runServer(re, cluster) cli := setupCli(re, ctx, endpoints) + defer cli.Close() var lastTS uint64 testutil.Eventually(re, func() bool { @@ -254,6 +256,7 @@ func TestTSOAllocatorLeader(t *testing.T) { allocatorLeaderMap[dcLocation] = pdName } cli := setupCli(re, ctx, endpoints) + defer cli.Close() innerCli, ok := cli.(interface{ GetServiceDiscovery() pd.ServiceDiscovery }) re.True(ok) @@ -287,7 +290,9 @@ func TestTSOFollowerProxy(t *testing.T) { endpoints := runServer(re, cluster) cli1 := setupCli(re, ctx, endpoints) + defer cli1.Close() cli2 := setupCli(re, ctx, endpoints) + defer cli2.Close() cli2.UpdateOption(pd.EnableTSOFollowerProxy, true) var wg sync.WaitGroup @@ -325,6 +330,7 @@ func TestUnavailableTimeAfterLeaderIsReady(t *testing.T) { endpoints := runServer(re, cluster) cli := setupCli(re, ctx, endpoints) + defer cli.Close() var wg sync.WaitGroup var maxUnavailableTime, leaderReadyTime time.Time @@ -397,6 +403,7 @@ func TestGlobalAndLocalTSO(t *testing.T) { endpoints := runServer(re, cluster) cli := setupCli(re, ctx, endpoints) + defer cli.Close() // Wait for all nodes becoming healthy. time.Sleep(time.Second * 5) @@ -508,6 +515,7 @@ func TestCustomTimeout(t *testing.T) { endpoints := runServer(re, cluster) cli := setupCli(re, ctx, endpoints, pd.WithCustomTimeoutOption(time.Second)) + defer cli.Close() start := time.Now() re.NoError(failpoint.Enable("github.com/tikv/pd/server/customTimeout", "return(true)")) @@ -581,6 +589,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetRegionByFollowerForwardin defer cancel() cli := setupCli(re, ctx, suite.endpoints, pd.WithForwardingOption(true)) + defer cli.Close() re.NoError(failpoint.Enable("github.com/tikv/pd/client/unreachableNetwork1", "return(true)")) time.Sleep(200 * time.Millisecond) r, err := cli.GetRegion(context.Background(), []byte("a")) @@ -600,6 +609,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetTsoByFollowerForwarding1( ctx, cancel := context.WithCancel(suite.ctx) defer cancel() cli := setupCli(re, ctx, suite.endpoints, pd.WithForwardingOption(true)) + defer cli.Close() re.NoError(failpoint.Enable("github.com/tikv/pd/client/unreachableNetwork", "return(true)")) var lastTS uint64 @@ -634,6 +644,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetTsoByFollowerForwarding2( ctx, cancel := context.WithCancel(suite.ctx) defer cancel() cli := setupCli(re, ctx, suite.endpoints, pd.WithForwardingOption(true)) + defer cli.Close() re.NoError(failpoint.Enable("github.com/tikv/pd/client/unreachableNetwork", "return(true)")) var lastTS uint64 @@ -670,6 +681,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetTsoAndRegionByFollowerFor re.NoError(failpoint.Enable("github.com/tikv/pd/client/grpcutil/unreachableNetwork2", fmt.Sprintf("return(\"%s\")", follower.GetAddr()))) cli := setupCli(re, ctx, suite.endpoints, pd.WithForwardingOption(true)) + defer cli.Close() var lastTS uint64 testutil.Eventually(re, func() bool { physical, logical, err := cli.GetTS(context.TODO()) @@ -732,6 +744,7 @@ func (suite *followerForwardAndHandleTestSuite) TestGetRegionFromFollower() { cluster := suite.cluster cli := setupCli(re, ctx, suite.endpoints) + defer cli.Close() cli.UpdateOption(pd.EnableFollowerHandle, true) re.NotEmpty(cluster.WaitLeader()) leader := cluster.GetLeaderServer() diff --git a/tests/integrations/client/client_tls_test.go b/tests/integrations/client/client_tls_test.go index b46895f4f8c..bdfe050bf45 100644 --- a/tests/integrations/client/client_tls_test.go +++ b/tests/integrations/client/client_tls_test.go @@ -178,8 +178,8 @@ func testTLSReload( dcancel() return } - dcancel() cli.Close() + dcancel() } }() @@ -212,12 +212,13 @@ func testTLSReload( caData, certData, keyData := loadTLSContent(re, testClientTLSInfo.TrustedCAFile, testClientTLSInfo.CertFile, testClientTLSInfo.KeyFile) ctx1, cancel1 := context.WithTimeout(ctx, 2*time.Second) - _, err = pd.NewClientWithContext(ctx1, endpoints, pd.SecurityOption{ + cli, err = pd.NewClientWithContext(ctx1, endpoints, pd.SecurityOption{ SSLCABytes: caData, SSLCertBytes: certData, SSLKEYBytes: keyData, }, pd.WithGRPCDialOptions(grpc.WithBlock())) re.NoError(err) + defer cli.Close() cancel1() } diff --git a/tests/integrations/client/gc_client_test.go b/tests/integrations/client/gc_client_test.go index a2c3c3263f7..737fd09a08f 100644 --- a/tests/integrations/client/gc_client_test.go +++ b/tests/integrations/client/gc_client_test.go @@ -89,6 +89,7 @@ func (suite *gcClientTestSuite) TearDownSuite() { re := suite.Require() re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/gc/checkKeyspace")) suite.cleanup() + suite.client.Close() } func (suite *gcClientTestSuite) TearDownTest() { diff --git a/tests/integrations/client/global_config_test.go b/tests/integrations/client/global_config_test.go index 349b16579bd..aeb704c3305 100644 --- a/tests/integrations/client/global_config_test.go +++ b/tests/integrations/client/global_config_test.go @@ -80,6 +80,7 @@ func (suite *globalConfigTestSuite) SetupSuite() { func (suite *globalConfigTestSuite) TearDownSuite() { suite.client.Close() suite.cleanup() + suite.client.Close() } func (suite *globalConfigTestSuite) GetEtcdPath(configPath string) string { diff --git a/tests/integrations/client/http_client_test.go b/tests/integrations/client/http_client_test.go index 5cfd8fc25f2..4961fb9b90a 100644 --- a/tests/integrations/client/http_client_test.go +++ b/tests/integrations/client/http_client_test.go @@ -16,19 +16,19 @@ package client_test import ( "context" - "errors" "math" "net/http" + "net/url" "sort" "strings" "testing" "time" + "github.com/pingcap/errors" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - pdCli "github.com/tikv/pd/client" pd "github.com/tikv/pd/client/http" "github.com/tikv/pd/client/retry" "github.com/tikv/pd/pkg/core" @@ -41,13 +41,27 @@ import ( "github.com/tikv/pd/tests" ) +type mode int + +// We have two ways to create HTTP client. +// 1. using `NewClient` which created `DefaultPDServiceDiscovery` +// 2. using `NewClientWithServiceDiscovery` which pass a `PDServiceDiscovery` as parameter +// test cases should be run in both modes. +const ( + defaultServiceDiscovery mode = iota + specificServiceDiscovery +) + type httpClientTestSuite struct { suite.Suite + env map[mode]*httpClientTestEnv +} + +type httpClientTestEnv struct { ctx context.Context cancelFunc context.CancelFunc cluster *tests.TestCluster - client pd.Client - sd pdCli.ServiceDiscovery + endpoints []string } func TestHTTPClientTestSuite(t *testing.T) { @@ -55,116 +69,153 @@ func TestHTTPClientTestSuite(t *testing.T) { } func (suite *httpClientTestSuite) SetupSuite() { + suite.env = make(map[mode]*httpClientTestEnv) re := suite.Require() - var err error - suite.ctx, suite.cancelFunc = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestCluster(suite.ctx, 2) - re.NoError(err) - err = suite.cluster.RunInitialServers() - re.NoError(err) - leader := suite.cluster.WaitLeader() - re.NotEmpty(leader) - leaderServer := suite.cluster.GetLeaderServer() - err = leaderServer.BootstrapCluster() - re.NoError(err) - for _, region := range []*core.RegionInfo{ - core.NewTestRegionInfo(10, 1, []byte("a1"), []byte("a2")), - core.NewTestRegionInfo(11, 1, []byte("a2"), []byte("a3")), - } { - err := leaderServer.GetRaftCluster().HandleRegionHeartbeat(region) + + for _, mode := range []mode{defaultServiceDiscovery, specificServiceDiscovery} { + env := &httpClientTestEnv{} + env.ctx, env.cancelFunc = context.WithCancel(context.Background()) + + cluster, err := tests.NewTestCluster(env.ctx, 2) re.NoError(err) + + err = cluster.RunInitialServers() + re.NoError(err) + leader := cluster.WaitLeader() + re.NotEmpty(leader) + leaderServer := cluster.GetLeaderServer() + err = leaderServer.BootstrapCluster() + re.NoError(err) + for _, region := range []*core.RegionInfo{ + core.NewTestRegionInfo(10, 1, []byte("a1"), []byte("a2")), + core.NewTestRegionInfo(11, 1, []byte("a2"), []byte("a3")), + } { + err := leaderServer.GetRaftCluster().HandleRegionHeartbeat(region) + re.NoError(err) + } + var ( + testServers = cluster.GetServers() + endpoints = make([]string, 0, len(testServers)) + ) + for _, s := range testServers { + addr := s.GetConfig().AdvertiseClientUrls + url, err := url.Parse(addr) + re.NoError(err) + endpoints = append(endpoints, url.Host) + } + env.endpoints = endpoints + env.cluster = cluster + + suite.env[mode] = env } - var ( - testServers = suite.cluster.GetServers() - endpoints = make([]string, 0, len(testServers)) - ) - for _, s := range testServers { - endpoints = append(endpoints, s.GetConfig().AdvertiseClientUrls) - } - cli := setupCli(re, suite.ctx, endpoints) - suite.sd = cli.GetServiceDiscovery() - suite.client = pd.NewClientWithServiceDiscovery("pd-http-client-it", suite.sd) } func (suite *httpClientTestSuite) TearDownSuite() { - suite.cancelFunc() - suite.client.Close() - suite.cluster.Destroy() + for _, env := range suite.env { + env.cancelFunc() + env.cluster.Destroy() + } +} + +// RunTestInTwoModes is to run test in two modes. +func (suite *httpClientTestSuite) RunTestInTwoModes(test func(mode mode, client pd.Client)) { + // Run test with specific service discovery. + cli := setupCli(suite.Require(), suite.env[specificServiceDiscovery].ctx, suite.env[specificServiceDiscovery].endpoints) + sd := cli.GetServiceDiscovery() + client := pd.NewClientWithServiceDiscovery("pd-http-client-it-grpc", sd) + test(specificServiceDiscovery, client) + client.Close() + + // Run test with default service discovery. + client = pd.NewClient("pd-http-client-it-http", suite.env[defaultServiceDiscovery].endpoints) + test(defaultServiceDiscovery, client) + client.Close() } func (suite *httpClientTestSuite) TestMeta() { + suite.RunTestInTwoModes(suite.checkMeta) +} + +func (suite *httpClientTestSuite) checkMeta(mode mode, client pd.Client) { re := suite.Require() - replicateConfig, err := suite.client.GetReplicateConfig(suite.ctx) + env := suite.env[mode] + replicateConfig, err := client.GetReplicateConfig(env.ctx) re.NoError(err) re.Equal(3.0, replicateConfig["max-replicas"]) - region, err := suite.client.GetRegionByID(suite.ctx, 10) + region, err := client.GetRegionByID(env.ctx, 10) re.NoError(err) re.Equal(int64(10), region.ID) re.Equal(core.HexRegionKeyStr([]byte("a1")), region.StartKey) re.Equal(core.HexRegionKeyStr([]byte("a2")), region.EndKey) - region, err = suite.client.GetRegionByKey(suite.ctx, []byte("a2")) + region, err = client.GetRegionByKey(env.ctx, []byte("a2")) re.NoError(err) re.Equal(int64(11), region.ID) re.Equal(core.HexRegionKeyStr([]byte("a2")), region.StartKey) re.Equal(core.HexRegionKeyStr([]byte("a3")), region.EndKey) - regions, err := suite.client.GetRegions(suite.ctx) + regions, err := client.GetRegions(env.ctx) re.NoError(err) re.Equal(int64(2), regions.Count) re.Len(regions.Regions, 2) - regions, err = suite.client.GetRegionsByKeyRange(suite.ctx, pd.NewKeyRange([]byte("a1"), []byte("a3")), -1) + regions, err = client.GetRegionsByKeyRange(env.ctx, pd.NewKeyRange([]byte("a1"), []byte("a3")), -1) re.NoError(err) re.Equal(int64(2), regions.Count) re.Len(regions.Regions, 2) - regions, err = suite.client.GetRegionsByStoreID(suite.ctx, 1) + regions, err = client.GetRegionsByStoreID(env.ctx, 1) re.NoError(err) re.Equal(int64(2), regions.Count) re.Len(regions.Regions, 2) - regions, err = suite.client.GetEmptyRegions(suite.ctx) + regions, err = client.GetEmptyRegions(env.ctx) re.NoError(err) re.Equal(int64(2), regions.Count) re.Len(regions.Regions, 2) - state, err := suite.client.GetRegionsReplicatedStateByKeyRange(suite.ctx, pd.NewKeyRange([]byte("a1"), []byte("a3"))) + state, err := client.GetRegionsReplicatedStateByKeyRange(env.ctx, pd.NewKeyRange([]byte("a1"), []byte("a3"))) re.NoError(err) re.Equal("INPROGRESS", state) - regionStats, err := suite.client.GetRegionStatusByKeyRange(suite.ctx, pd.NewKeyRange([]byte("a1"), []byte("a3")), false) + regionStats, err := client.GetRegionStatusByKeyRange(env.ctx, pd.NewKeyRange([]byte("a1"), []byte("a3")), false) re.NoError(err) re.Greater(regionStats.Count, 0) re.NotEmpty(regionStats.StoreLeaderCount) - regionStats, err = suite.client.GetRegionStatusByKeyRange(suite.ctx, pd.NewKeyRange([]byte("a1"), []byte("a3")), true) + regionStats, err = client.GetRegionStatusByKeyRange(env.ctx, pd.NewKeyRange([]byte("a1"), []byte("a3")), true) re.NoError(err) re.Greater(regionStats.Count, 0) re.Empty(regionStats.StoreLeaderCount) - hotReadRegions, err := suite.client.GetHotReadRegions(suite.ctx) + hotReadRegions, err := client.GetHotReadRegions(env.ctx) re.NoError(err) re.Len(hotReadRegions.AsPeer, 1) re.Len(hotReadRegions.AsLeader, 1) - hotWriteRegions, err := suite.client.GetHotWriteRegions(suite.ctx) + hotWriteRegions, err := client.GetHotWriteRegions(env.ctx) re.NoError(err) re.Len(hotWriteRegions.AsPeer, 1) re.Len(hotWriteRegions.AsLeader, 1) - historyHorRegions, err := suite.client.GetHistoryHotRegions(suite.ctx, &pd.HistoryHotRegionsRequest{ + historyHorRegions, err := client.GetHistoryHotRegions(env.ctx, &pd.HistoryHotRegionsRequest{ StartTime: 0, EndTime: time.Now().AddDate(0, 0, 1).UnixNano() / int64(time.Millisecond), }) re.NoError(err) re.Empty(historyHorRegions.HistoryHotRegion) - store, err := suite.client.GetStores(suite.ctx) + store, err := client.GetStores(env.ctx) re.NoError(err) re.Equal(1, store.Count) re.Len(store.Stores, 1) storeID := uint64(store.Stores[0].Store.ID) // TODO: why type is different? - store2, err := suite.client.GetStore(suite.ctx, storeID) + store2, err := client.GetStore(env.ctx, storeID) re.NoError(err) re.EqualValues(storeID, store2.Store.ID) - version, err := suite.client.GetClusterVersion(suite.ctx) + version, err := client.GetClusterVersion(env.ctx) re.NoError(err) re.Equal("0.0.0", version) } func (suite *httpClientTestSuite) TestGetMinResolvedTSByStoresIDs() { + suite.RunTestInTwoModes(suite.checkGetMinResolvedTSByStoresIDs) +} + +func (suite *httpClientTestSuite) checkGetMinResolvedTSByStoresIDs(mode mode, client pd.Client) { re := suite.Require() + env := suite.env[mode] + testMinResolvedTS := tsoutil.TimeToTS(time.Now()) - raftCluster := suite.cluster.GetLeaderServer().GetRaftCluster() + raftCluster := env.cluster.GetLeaderServer().GetRaftCluster() err := raftCluster.SetMinResolvedTS(1, testMinResolvedTS) re.NoError(err) // Make sure the min resolved TS is updated. @@ -173,18 +224,18 @@ func (suite *httpClientTestSuite) TestGetMinResolvedTSByStoresIDs() { return minResolvedTS == testMinResolvedTS }) // Wait for the cluster-level min resolved TS to be initialized. - minResolvedTS, storeMinResolvedTSMap, err := suite.client.GetMinResolvedTSByStoresIDs(suite.ctx, nil) + minResolvedTS, storeMinResolvedTSMap, err := client.GetMinResolvedTSByStoresIDs(env.ctx, nil) re.NoError(err) re.Equal(testMinResolvedTS, minResolvedTS) re.Empty(storeMinResolvedTSMap) // Get the store-level min resolved TS. - minResolvedTS, storeMinResolvedTSMap, err = suite.client.GetMinResolvedTSByStoresIDs(suite.ctx, []uint64{1}) + minResolvedTS, storeMinResolvedTSMap, err = client.GetMinResolvedTSByStoresIDs(env.ctx, []uint64{1}) re.NoError(err) re.Equal(testMinResolvedTS, minResolvedTS) re.Len(storeMinResolvedTSMap, 1) re.Equal(minResolvedTS, storeMinResolvedTSMap[1]) // Get the store-level min resolved TS with an invalid store ID. - minResolvedTS, storeMinResolvedTSMap, err = suite.client.GetMinResolvedTSByStoresIDs(suite.ctx, []uint64{1, 2}) + minResolvedTS, storeMinResolvedTSMap, err = client.GetMinResolvedTSByStoresIDs(env.ctx, []uint64{1, 2}) re.NoError(err) re.Equal(testMinResolvedTS, minResolvedTS) re.Len(storeMinResolvedTSMap, 2) @@ -193,16 +244,22 @@ func (suite *httpClientTestSuite) TestGetMinResolvedTSByStoresIDs() { } func (suite *httpClientTestSuite) TestRule() { + suite.RunTestInTwoModes(suite.checkRule) +} + +func (suite *httpClientTestSuite) checkRule(mode mode, client pd.Client) { re := suite.Require() - bundles, err := suite.client.GetAllPlacementRuleBundles(suite.ctx) + env := suite.env[mode] + + bundles, err := client.GetAllPlacementRuleBundles(env.ctx) re.NoError(err) re.Len(bundles, 1) re.Equal(placement.DefaultGroupID, bundles[0].ID) - bundle, err := suite.client.GetPlacementRuleBundleByGroup(suite.ctx, placement.DefaultGroupID) + bundle, err := client.GetPlacementRuleBundleByGroup(env.ctx, placement.DefaultGroupID) re.NoError(err) re.Equal(bundles[0], bundle) // Check if we have the default rule. - suite.checkRule(re, &pd.Rule{ + suite.checkRuleResult(re, env, client, &pd.Rule{ GroupID: placement.DefaultGroupID, ID: placement.DefaultRuleID, Role: pd.Voter, @@ -211,7 +268,7 @@ func (suite *httpClientTestSuite) TestRule() { EndKey: []byte{}, }, 1, true) // Should be the same as the rules in the bundle. - suite.checkRule(re, bundle.Rules[0], 1, true) + suite.checkRuleResult(re, env, client, bundle.Rules[0], 1, true) testRule := &pd.Rule{ GroupID: placement.DefaultGroupID, ID: "test", @@ -220,39 +277,39 @@ func (suite *httpClientTestSuite) TestRule() { StartKey: []byte{}, EndKey: []byte{}, } - err = suite.client.SetPlacementRule(suite.ctx, testRule) + err = client.SetPlacementRule(env.ctx, testRule) re.NoError(err) - suite.checkRule(re, testRule, 2, true) - err = suite.client.DeletePlacementRule(suite.ctx, placement.DefaultGroupID, "test") + suite.checkRuleResult(re, env, client, testRule, 2, true) + err = client.DeletePlacementRule(env.ctx, placement.DefaultGroupID, "test") re.NoError(err) - suite.checkRule(re, testRule, 1, false) + suite.checkRuleResult(re, env, client, testRule, 1, false) testRuleOp := &pd.RuleOp{ Rule: testRule, Action: pd.RuleOpAdd, } - err = suite.client.SetPlacementRuleInBatch(suite.ctx, []*pd.RuleOp{testRuleOp}) + err = client.SetPlacementRuleInBatch(env.ctx, []*pd.RuleOp{testRuleOp}) re.NoError(err) - suite.checkRule(re, testRule, 2, true) + suite.checkRuleResult(re, env, client, testRule, 2, true) testRuleOp = &pd.RuleOp{ Rule: testRule, Action: pd.RuleOpDel, } - err = suite.client.SetPlacementRuleInBatch(suite.ctx, []*pd.RuleOp{testRuleOp}) + err = client.SetPlacementRuleInBatch(env.ctx, []*pd.RuleOp{testRuleOp}) re.NoError(err) - suite.checkRule(re, testRule, 1, false) - err = suite.client.SetPlacementRuleBundles(suite.ctx, []*pd.GroupBundle{ + suite.checkRuleResult(re, env, client, testRule, 1, false) + err = client.SetPlacementRuleBundles(env.ctx, []*pd.GroupBundle{ { ID: placement.DefaultGroupID, Rules: []*pd.Rule{testRule}, }, }, true) re.NoError(err) - suite.checkRule(re, testRule, 1, true) - ruleGroups, err := suite.client.GetAllPlacementRuleGroups(suite.ctx) + suite.checkRuleResult(re, env, client, testRule, 1, true) + ruleGroups, err := client.GetAllPlacementRuleGroups(env.ctx) re.NoError(err) re.Len(ruleGroups, 1) re.Equal(placement.DefaultGroupID, ruleGroups[0].ID) - ruleGroup, err := suite.client.GetPlacementRuleGroupByID(suite.ctx, placement.DefaultGroupID) + ruleGroup, err := client.GetPlacementRuleGroupByID(env.ctx, placement.DefaultGroupID) re.NoError(err) re.Equal(ruleGroups[0], ruleGroup) testRuleGroup := &pd.RuleGroup{ @@ -260,14 +317,14 @@ func (suite *httpClientTestSuite) TestRule() { Index: 1, Override: true, } - err = suite.client.SetPlacementRuleGroup(suite.ctx, testRuleGroup) + err = client.SetPlacementRuleGroup(env.ctx, testRuleGroup) re.NoError(err) - ruleGroup, err = suite.client.GetPlacementRuleGroupByID(suite.ctx, testRuleGroup.ID) + ruleGroup, err = client.GetPlacementRuleGroupByID(env.ctx, testRuleGroup.ID) re.NoError(err) re.Equal(testRuleGroup, ruleGroup) - err = suite.client.DeletePlacementRuleGroupByID(suite.ctx, testRuleGroup.ID) + err = client.DeletePlacementRuleGroupByID(env.ctx, testRuleGroup.ID) re.NoError(err) - ruleGroup, err = suite.client.GetPlacementRuleGroupByID(suite.ctx, testRuleGroup.ID) + ruleGroup, err = client.GetPlacementRuleGroupByID(env.ctx, testRuleGroup.ID) re.ErrorContains(err, http.StatusText(http.StatusNotFound)) re.Empty(ruleGroup) // Test the start key and end key. @@ -279,32 +336,34 @@ func (suite *httpClientTestSuite) TestRule() { StartKey: []byte("a1"), EndKey: []byte(""), } - err = suite.client.SetPlacementRule(suite.ctx, testRule) + err = client.SetPlacementRule(env.ctx, testRule) re.NoError(err) - suite.checkRule(re, testRule, 1, true) + suite.checkRuleResult(re, env, client, testRule, 1, true) } -func (suite *httpClientTestSuite) checkRule( +func (suite *httpClientTestSuite) checkRuleResult( re *require.Assertions, + env *httpClientTestEnv, + client pd.Client, rule *pd.Rule, totalRuleCount int, exist bool, ) { if exist { - got, err := suite.client.GetPlacementRule(suite.ctx, rule.GroupID, rule.ID) + got, err := client.GetPlacementRule(env.ctx, rule.GroupID, rule.ID) re.NoError(err) // skip comparison of the generated field got.StartKeyHex = rule.StartKeyHex got.EndKeyHex = rule.EndKeyHex re.Equal(rule, got) } else { - _, err := suite.client.GetPlacementRule(suite.ctx, rule.GroupID, rule.ID) + _, err := client.GetPlacementRule(env.ctx, rule.GroupID, rule.ID) re.ErrorContains(err, http.StatusText(http.StatusNotFound)) } // Check through the `GetPlacementRulesByGroup` API. - rules, err := suite.client.GetPlacementRulesByGroup(suite.ctx, rule.GroupID) + rules, err := client.GetPlacementRulesByGroup(env.ctx, rule.GroupID) re.NoError(err) checkRuleFunc(re, rules, rule, totalRuleCount, exist) // Check through the `GetPlacementRuleBundleByGroup` API. - bundle, err := suite.client.GetPlacementRuleBundleByGroup(suite.ctx, rule.GroupID) + bundle, err := client.GetPlacementRuleBundleByGroup(env.ctx, rule.GroupID) re.NoError(err) checkRuleFunc(re, bundle.Rules, rule, totalRuleCount, exist) } @@ -332,8 +391,14 @@ func checkRuleFunc( } func (suite *httpClientTestSuite) TestRegionLabel() { + suite.RunTestInTwoModes(suite.checkRegionLabel) +} + +func (suite *httpClientTestSuite) checkRegionLabel(mode mode, client pd.Client) { re := suite.Require() - labelRules, err := suite.client.GetAllRegionLabelRules(suite.ctx) + env := suite.env[mode] + + labelRules, err := client.GetAllRegionLabelRules(env.ctx) re.NoError(err) re.Len(labelRules, 1) re.Equal("keyspaces/0", labelRules[0].ID) @@ -344,9 +409,9 @@ func (suite *httpClientTestSuite) TestRegionLabel() { RuleType: "key-range", Data: labeler.MakeKeyRanges("1234", "5678"), } - err = suite.client.SetRegionLabelRule(suite.ctx, labelRule) + err = client.SetRegionLabelRule(env.ctx, labelRule) re.NoError(err) - labelRules, err = suite.client.GetAllRegionLabelRules(suite.ctx) + labelRules, err = client.GetAllRegionLabelRules(env.ctx) re.NoError(err) re.Len(labelRules, 2) sort.Slice(labelRules, func(i, j int) bool { @@ -366,9 +431,9 @@ func (suite *httpClientTestSuite) TestRegionLabel() { SetRules: []*pd.LabelRule{labelRule}, DeleteRules: []string{"rule1"}, } - err = suite.client.PatchRegionLabelRules(suite.ctx, patch) + err = client.PatchRegionLabelRules(env.ctx, patch) re.NoError(err) - allLabelRules, err := suite.client.GetAllRegionLabelRules(suite.ctx) + allLabelRules, err := client.GetAllRegionLabelRules(env.ctx) re.NoError(err) re.Len(labelRules, 2) sort.Slice(allLabelRules, func(i, j int) bool { @@ -377,7 +442,7 @@ func (suite *httpClientTestSuite) TestRegionLabel() { re.Equal(labelRule.ID, allLabelRules[1].ID) re.Equal(labelRule.Labels, allLabelRules[1].Labels) re.Equal(labelRule.RuleType, allLabelRules[1].RuleType) - labelRules, err = suite.client.GetRegionLabelRulesByIDs(suite.ctx, []string{"keyspaces/0", "rule2"}) + labelRules, err = client.GetRegionLabelRulesByIDs(env.ctx, []string{"keyspaces/0", "rule2"}) re.NoError(err) sort.Slice(labelRules, func(i, j int) bool { return labelRules[i].ID < labelRules[j].ID @@ -386,18 +451,24 @@ func (suite *httpClientTestSuite) TestRegionLabel() { } func (suite *httpClientTestSuite) TestAccelerateSchedule() { + suite.RunTestInTwoModes(suite.checkAccelerateSchedule) +} + +func (suite *httpClientTestSuite) checkAccelerateSchedule(mode mode, client pd.Client) { re := suite.Require() - raftCluster := suite.cluster.GetLeaderServer().GetRaftCluster() + env := suite.env[mode] + + raftCluster := env.cluster.GetLeaderServer().GetRaftCluster() suspectRegions := raftCluster.GetSuspectRegions() re.Empty(suspectRegions) - err := suite.client.AccelerateSchedule(suite.ctx, pd.NewKeyRange([]byte("a1"), []byte("a2"))) + err := client.AccelerateSchedule(env.ctx, pd.NewKeyRange([]byte("a1"), []byte("a2"))) re.NoError(err) suspectRegions = raftCluster.GetSuspectRegions() re.Len(suspectRegions, 1) raftCluster.ClearSuspectRegions() suspectRegions = raftCluster.GetSuspectRegions() re.Empty(suspectRegions) - err = suite.client.AccelerateScheduleInBatch(suite.ctx, []*pd.KeyRange{ + err = client.AccelerateScheduleInBatch(env.ctx, []*pd.KeyRange{ pd.NewKeyRange([]byte("a1"), []byte("a2")), pd.NewKeyRange([]byte("a2"), []byte("a3")), }) @@ -407,18 +478,24 @@ func (suite *httpClientTestSuite) TestAccelerateSchedule() { } func (suite *httpClientTestSuite) TestConfig() { + suite.RunTestInTwoModes(suite.checkConfig) +} + +func (suite *httpClientTestSuite) checkConfig(mode mode, client pd.Client) { re := suite.Require() - config, err := suite.client.GetConfig(suite.ctx) + env := suite.env[mode] + + config, err := client.GetConfig(env.ctx) re.NoError(err) re.Equal(float64(4), config["schedule"].(map[string]interface{})["leader-schedule-limit"]) newConfig := map[string]interface{}{ "schedule.leader-schedule-limit": float64(8), } - err = suite.client.SetConfig(suite.ctx, newConfig) + err = client.SetConfig(env.ctx, newConfig) re.NoError(err) - config, err = suite.client.GetConfig(suite.ctx) + config, err = client.GetConfig(env.ctx) re.NoError(err) re.Equal(float64(8), config["schedule"].(map[string]interface{})["leader-schedule-limit"]) @@ -426,58 +503,82 @@ func (suite *httpClientTestSuite) TestConfig() { newConfig = map[string]interface{}{ "schedule.leader-schedule-limit": float64(16), } - err = suite.client.SetConfig(suite.ctx, newConfig, 5) + err = client.SetConfig(env.ctx, newConfig, 5) re.NoError(err) - resp, err := suite.cluster.GetEtcdClient().Get(suite.ctx, sc.TTLConfigPrefix+"/schedule.leader-schedule-limit") + resp, err := env.cluster.GetEtcdClient().Get(env.ctx, sc.TTLConfigPrefix+"/schedule.leader-schedule-limit") re.NoError(err) re.Equal([]byte("16"), resp.Kvs[0].Value) + // delete the config with TTL. + err = client.SetConfig(env.ctx, newConfig, 0) + re.NoError(err) + resp, err = env.cluster.GetEtcdClient().Get(env.ctx, sc.TTLConfigPrefix+"/schedule.leader-schedule-limit") + re.NoError(err) + re.Empty(resp.Kvs) } func (suite *httpClientTestSuite) TestScheduleConfig() { + suite.RunTestInTwoModes(suite.checkScheduleConfig) +} + +func (suite *httpClientTestSuite) checkScheduleConfig(mode mode, client pd.Client) { re := suite.Require() - config, err := suite.client.GetScheduleConfig(suite.ctx) + env := suite.env[mode] + + config, err := client.GetScheduleConfig(env.ctx) re.NoError(err) - re.Equal(float64(4), config["leader-schedule-limit"]) + re.Equal(float64(4), config["hot-region-schedule-limit"]) re.Equal(float64(2048), config["region-schedule-limit"]) - config["leader-schedule-limit"] = float64(8) - err = suite.client.SetScheduleConfig(suite.ctx, config) + config["hot-region-schedule-limit"] = float64(8) + err = client.SetScheduleConfig(env.ctx, config) re.NoError(err) - config, err = suite.client.GetScheduleConfig(suite.ctx) + config, err = client.GetScheduleConfig(env.ctx) re.NoError(err) - re.Equal(float64(8), config["leader-schedule-limit"]) + re.Equal(float64(8), config["hot-region-schedule-limit"]) re.Equal(float64(2048), config["region-schedule-limit"]) } func (suite *httpClientTestSuite) TestSchedulers() { + suite.RunTestInTwoModes(suite.checkSchedulers) +} + +func (suite *httpClientTestSuite) checkSchedulers(mode mode, client pd.Client) { re := suite.Require() - schedulers, err := suite.client.GetSchedulers(suite.ctx) + env := suite.env[mode] + + schedulers, err := client.GetSchedulers(env.ctx) re.NoError(err) re.Empty(schedulers) - err = suite.client.CreateScheduler(suite.ctx, "evict-leader-scheduler", 1) + err = client.CreateScheduler(env.ctx, "evict-leader-scheduler", 1) re.NoError(err) - schedulers, err = suite.client.GetSchedulers(suite.ctx) + schedulers, err = client.GetSchedulers(env.ctx) re.NoError(err) re.Len(schedulers, 1) - err = suite.client.SetSchedulerDelay(suite.ctx, "evict-leader-scheduler", 100) + err = client.SetSchedulerDelay(env.ctx, "evict-leader-scheduler", 100) re.NoError(err) - err = suite.client.SetSchedulerDelay(suite.ctx, "not-exist", 100) + err = client.SetSchedulerDelay(env.ctx, "not-exist", 100) re.ErrorContains(err, "500 Internal Server Error") // TODO: should return friendly error message } func (suite *httpClientTestSuite) TestSetStoreLabels() { + suite.RunTestInTwoModes(suite.checkSetStoreLabels) +} + +func (suite *httpClientTestSuite) checkSetStoreLabels(mode mode, client pd.Client) { re := suite.Require() - resp, err := suite.client.GetStores(suite.ctx) + env := suite.env[mode] + + resp, err := client.GetStores(env.ctx) re.NoError(err) setStore := resp.Stores[0] re.Empty(setStore.Store.Labels, nil) storeLabels := map[string]string{ "zone": "zone1", } - err = suite.client.SetStoreLabels(suite.ctx, 1, storeLabels) + err = client.SetStoreLabels(env.ctx, 1, storeLabels) re.NoError(err) - resp, err = suite.client.GetStores(suite.ctx) + resp, err = client.GetStores(env.ctx) re.NoError(err) for _, store := range resp.Stores { if store.Store.ID == setStore.Store.ID { @@ -489,74 +590,104 @@ func (suite *httpClientTestSuite) TestSetStoreLabels() { } func (suite *httpClientTestSuite) TestTransferLeader() { + suite.RunTestInTwoModes(suite.checkTransferLeader) +} + +func (suite *httpClientTestSuite) checkTransferLeader(mode mode, client pd.Client) { re := suite.Require() - members, err := suite.client.GetMembers(suite.ctx) + env := suite.env[mode] + + members, err := client.GetMembers(env.ctx) re.NoError(err) re.Len(members.Members, 2) - leader, err := suite.client.GetLeader(suite.ctx) + leader, err := client.GetLeader(env.ctx) re.NoError(err) // Transfer leader to another pd for _, member := range members.Members { if member.GetName() != leader.GetName() { - err = suite.client.TransferLeader(suite.ctx, member.GetName()) + err = client.TransferLeader(env.ctx, member.GetName()) re.NoError(err) break } } - newLeader := suite.cluster.WaitLeader() + newLeader := env.cluster.WaitLeader() re.NotEmpty(newLeader) re.NoError(err) re.NotEqual(leader.GetName(), newLeader) // Force to update the members info. testutil.Eventually(re, func() bool { - leader, err = suite.client.GetLeader(suite.ctx) + leader, err = client.GetLeader(env.ctx) re.NoError(err) return newLeader == leader.GetName() }) - members, err = suite.client.GetMembers(suite.ctx) + members, err = client.GetMembers(env.ctx) re.NoError(err) re.Len(members.Members, 2) re.Equal(leader.GetName(), members.Leader.GetName()) } func (suite *httpClientTestSuite) TestVersion() { + suite.RunTestInTwoModes(suite.checkVersion) +} + +func (suite *httpClientTestSuite) checkVersion(mode mode, client pd.Client) { re := suite.Require() - ver, err := suite.client.GetPDVersion(suite.ctx) + env := suite.env[mode] + + ver, err := client.GetPDVersion(env.ctx) re.NoError(err) re.Equal(versioninfo.PDReleaseVersion, ver) } func (suite *httpClientTestSuite) TestAdmin() { + suite.RunTestInTwoModes(suite.checkAdmin) +} + +func (suite *httpClientTestSuite) checkAdmin(mode mode, client pd.Client) { re := suite.Require() - err := suite.client.SetSnapshotRecoveringMark(suite.ctx) + env := suite.env[mode] + + err := client.SetSnapshotRecoveringMark(env.ctx) re.NoError(err) - err = suite.client.ResetTS(suite.ctx, 123, true) + err = client.ResetTS(env.ctx, 123, true) re.NoError(err) - err = suite.client.ResetBaseAllocID(suite.ctx, 456) + err = client.ResetBaseAllocID(env.ctx, 456) re.NoError(err) - err = suite.client.DeleteSnapshotRecoveringMark(suite.ctx) + err = client.DeleteSnapshotRecoveringMark(env.ctx) re.NoError(err) } func (suite *httpClientTestSuite) TestWithBackoffer() { + suite.RunTestInTwoModes(suite.checkWithBackoffer) +} + +func (suite *httpClientTestSuite) checkWithBackoffer(mode mode, client pd.Client) { re := suite.Require() + env := suite.env[mode] + // Should return with 404 error without backoffer. - rule, err := suite.client.GetPlacementRule(suite.ctx, "non-exist-group", "non-exist-rule") + rule, err := client.GetPlacementRule(env.ctx, "non-exist-group", "non-exist-rule") re.ErrorContains(err, http.StatusText(http.StatusNotFound)) re.Nil(rule) // Should return with 404 error even with an infinite backoffer. - rule, err = suite.client. + rule, err = client. WithBackoffer(retry.InitialBackoffer(100*time.Millisecond, time.Second, 0)). - GetPlacementRule(suite.ctx, "non-exist-group", "non-exist-rule") + GetPlacementRule(env.ctx, "non-exist-group", "non-exist-rule") re.ErrorContains(err, http.StatusText(http.StatusNotFound)) re.Nil(rule) } func (suite *httpClientTestSuite) TestRedirectWithMetrics() { re := suite.Require() + env := suite.env[defaultServiceDiscovery] + + cli := setupCli(suite.Require(), env.ctx, env.endpoints) + defer cli.Close() + sd := cli.GetServiceDiscovery() + metricCnt := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "check", @@ -568,7 +699,7 @@ func (suite *httpClientTestSuite) TestRedirectWithMetrics() { } return nil }) - c := pd.NewClientWithServiceDiscovery("pd-http-client-it", suite.sd, pd.WithHTTPClient(httpClient), pd.WithMetrics(metricCnt, nil)) + c := pd.NewClientWithServiceDiscovery("pd-http-client-it", sd, pd.WithHTTPClient(httpClient), pd.WithMetrics(metricCnt, nil)) c.CreateScheduler(context.Background(), "test", 0) var out dto.Metric failureCnt, err := metricCnt.GetMetricWithLabelValues([]string{"CreateScheduler", "network error"}...) @@ -577,7 +708,7 @@ func (suite *httpClientTestSuite) TestRedirectWithMetrics() { re.Equal(float64(2), out.Counter.GetValue()) c.Close() - leader := suite.sd.GetServingAddr() + leader := sd.GetServingAddr() httpClient = pd.NewHTTPClientWithRequestChecker(func(req *http.Request) error { // mock leader success. if !strings.Contains(leader, req.Host) { @@ -585,7 +716,7 @@ func (suite *httpClientTestSuite) TestRedirectWithMetrics() { } return nil }) - c = pd.NewClientWithServiceDiscovery("pd-http-client-it", suite.sd, pd.WithHTTPClient(httpClient), pd.WithMetrics(metricCnt, nil)) + c = pd.NewClientWithServiceDiscovery("pd-http-client-it", sd, pd.WithHTTPClient(httpClient), pd.WithMetrics(metricCnt, nil)) c.CreateScheduler(context.Background(), "test", 0) successCnt, err := metricCnt.GetMetricWithLabelValues([]string{"CreateScheduler", ""}...) re.NoError(err) @@ -600,7 +731,7 @@ func (suite *httpClientTestSuite) TestRedirectWithMetrics() { } return nil }) - c = pd.NewClientWithServiceDiscovery("pd-http-client-it", suite.sd, pd.WithHTTPClient(httpClient), pd.WithMetrics(metricCnt, nil)) + c = pd.NewClientWithServiceDiscovery("pd-http-client-it", sd, pd.WithHTTPClient(httpClient), pd.WithMetrics(metricCnt, nil)) c.CreateScheduler(context.Background(), "test", 0) successCnt, err = metricCnt.GetMetricWithLabelValues([]string{"CreateScheduler", ""}...) re.NoError(err) diff --git a/tests/integrations/go.mod b/tests/integrations/go.mod index 06db591af10..31d43cb86f6 100644 --- a/tests/integrations/go.mod +++ b/tests/integrations/go.mod @@ -15,11 +15,12 @@ require ( github.com/DATA-DOG/go-sqlmock v1.5.0 github.com/docker/go-units v0.5.0 github.com/go-sql-driver/mysql v1.7.0 + github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c github.com/pingcap/kvproto v0.0.0-20231226064240-4f28b82c7860 github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 - github.com/prometheus/client_golang v1.11.1 - github.com/prometheus/client_model v0.4.0 + github.com/prometheus/client_golang v1.18.0 + github.com/prometheus/client_model v0.5.0 github.com/stretchr/testify v1.8.4 github.com/tikv/pd v0.0.0-00010101000000-000000000000 github.com/tikv/pd/client v0.0.0-00010101000000-000000000000 @@ -117,7 +118,6 @@ require ( github.com/mailru/easyjson v0.7.6 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-sqlite3 v1.14.15 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/minio/sio v0.3.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -127,15 +127,14 @@ require ( github.com/petermattis/goid v0.0.0-20211229010228-4d14c490ee36 // indirect github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d // indirect github.com/pingcap/errcode v0.3.0 // indirect - github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21 // indirect github.com/pingcap/tidb-dashboard v0.0.0-20240111062855-41f7c8011953 // indirect github.com/pingcap/tipb v0.0.0-20220718022156-3e2483c20a9e // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect - github.com/prometheus/common v0.26.0 // indirect - github.com/prometheus/procfs v0.6.0 // indirect + github.com/prometheus/common v0.46.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/rs/cors v1.7.0 // indirect github.com/samber/lo v1.37.0 // indirect github.com/sasha-s/go-deadlock v0.2.0 // indirect @@ -169,13 +168,13 @@ require ( go.uber.org/fx v1.12.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/arch v0.3.0 // indirect - golang.org/x/crypto v0.17.0 // indirect + golang.org/x/crypto v0.18.0 // indirect golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 // indirect golang.org/x/image v0.10.0 // indirect - golang.org/x/net v0.18.0 // indirect - golang.org/x/oauth2 v0.4.0 // indirect + golang.org/x/net v0.20.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect golang.org/x/sync v0.4.0 // indirect - golang.org/x/sys v0.15.0 // indirect + golang.org/x/sys v0.16.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.14.0 // indirect @@ -183,7 +182,7 @@ require ( google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/protobuf v1.32.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/tests/integrations/go.sum b/tests/integrations/go.sum index df7da785b68..556932a2448 100644 --- a/tests/integrations/go.sum +++ b/tests/integrations/go.sum @@ -21,10 +21,7 @@ github.com/Xeoncross/go-aesctr-with-hmac v0.0.0-20200623134604-12b17a7ff502 h1:L github.com/Xeoncross/go-aesctr-with-hmac v0.0.0-20200623134604-12b17a7ff502/go.mod h1:pmnBM9bxWSiHvC/gSWunUIyDvGn33EkP2CUjxFKtTTM= github.com/agiledragon/gomonkey/v2 v2.3.1/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alvaroloes/enumer v1.1.2/go.mod h1:FxrjvuXoDAx9isTJrv4c+T410zFi0DtXIT0m65DJ+Wo= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4= @@ -79,7 +76,6 @@ github.com/cenkalti/backoff/v4 v4.0.2 h1:JIufpQLbh4DkbQoii76ItQIUFzevQSqOLZca4ea github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= @@ -154,11 +150,8 @@ github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -223,13 +216,6 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -240,10 +226,7 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -322,13 +305,10 @@ github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -341,8 +321,9 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -368,7 +349,6 @@ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microsoft/go-mssqldb v0.17.0 h1:Fto83dMZPnYv1Zwx5vHHxpNraeEaUlQ/hhHLgZiaenE= github.com/microsoft/go-mssqldb v0.17.0/go.mod h1:OkoNGhGEs8EZqchVTtochlXruEhEOaO4S0d2sB5aeGQ= @@ -454,34 +434,31 @@ github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:Om github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= +github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -645,8 +622,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 h1:QLureRX3moex6NVu/Lr4MGakp9FdA7sBHGBmvRW7NaM= golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= @@ -680,10 +657,8 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -694,20 +669,18 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -723,20 +696,15 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -751,8 +719,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -822,17 +790,11 @@ google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -856,9 +818,7 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/tests/integrations/mcs/scheduling/api_test.go b/tests/integrations/mcs/scheduling/api_test.go index 4c53f3fabb2..6d08380629b 100644 --- a/tests/integrations/mcs/scheduling/api_test.go +++ b/tests/integrations/mcs/scheduling/api_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/pingcap/failpoint" "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/core" _ "github.com/tikv/pd/pkg/mcs/scheduling/server/apis/v1" @@ -43,11 +44,18 @@ func TestAPI(t *testing.T) { } func (suite *apiTestSuite) SetupSuite() { + re := suite.Require() + re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/schedule/changeCoordinatorTicker", `return(true)`)) + re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/mcs/scheduling/server/changeRunCollectWaitTime", `return(true)`)) suite.env = tests.NewSchedulingTestEnvironment(suite.T()) } func (suite *apiTestSuite) TearDownSuite() { suite.env.Cleanup() + re := suite.Require() + re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/schedule/changeCoordinatorTicker")) + re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/mcs/scheduling/server/changeRunCollectWaitTime")) + testDialClient.CloseIdleConnections() } func (suite *apiTestSuite) TestGetCheckerByName() { @@ -190,12 +198,10 @@ func (suite *apiTestSuite) checkAPIForward(cluster *tests.TestCluster) { testutil.WithHeader(re, apiutil.XForwardedToMicroServiceHeader, "true")) re.NoError(err) re.Contains(resp, "balance-leader-scheduler") - re.Contains(resp, "balance-witness-scheduler") re.Contains(resp, "balance-hot-region-scheduler") schedulers := []string{ "balance-leader-scheduler", - "balance-witness-scheduler", "balance-hot-region-scheduler", } for _, schedulerName := range schedulers { @@ -397,13 +403,11 @@ func (suite *apiTestSuite) checkConfig(cluster *tests.TestCluster) { re.Equal(cfg.DataDir, s.GetConfig().DataDir) testutil.Eventually(re, func() bool { // wait for all schedulers to be loaded in scheduling server. - return len(cfg.Schedule.SchedulersPayload) == 6 + return len(cfg.Schedule.SchedulersPayload) == 4 }) re.Contains(cfg.Schedule.SchedulersPayload, "balance-leader-scheduler") re.Contains(cfg.Schedule.SchedulersPayload, "balance-region-scheduler") re.Contains(cfg.Schedule.SchedulersPayload, "balance-hot-region-scheduler") - re.Contains(cfg.Schedule.SchedulersPayload, "balance-witness-scheduler") - re.Contains(cfg.Schedule.SchedulersPayload, "transfer-witness-leader-scheduler") re.Contains(cfg.Schedule.SchedulersPayload, "evict-slow-store-scheduler") } @@ -428,7 +432,7 @@ func (suite *apiTestSuite) checkConfigForward(cluster *tests.TestCluster) { re.Equal(cfg["replication"].(map[string]interface{})["max-replicas"], float64(opts.GetReplicationConfig().MaxReplicas)) schedulers := cfg["schedule"].(map[string]interface{})["schedulers-payload"].(map[string]interface{}) - return len(schedulers) == 6 + return len(schedulers) == 4 }) // Test to change config in api server diff --git a/tests/integrations/mcs/scheduling/server_test.go b/tests/integrations/mcs/scheduling/server_test.go index 164c6ffdc7d..198fa360b9b 100644 --- a/tests/integrations/mcs/scheduling/server_test.go +++ b/tests/integrations/mcs/scheduling/server_test.go @@ -59,6 +59,8 @@ func TestServerTestSuite(t *testing.T) { func (suite *serverTestSuite) SetupSuite() { var err error re := suite.Require() + re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/schedule/changeCoordinatorTicker", `return(true)`)) + re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/mcs/scheduling/server/changeRunCollectWaitTime", `return(true)`)) re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) suite.ctx, suite.cancel = context.WithCancel(context.Background()) suite.cluster, err = tests.NewTestAPICluster(suite.ctx, 1) @@ -78,6 +80,8 @@ func (suite *serverTestSuite) TearDownSuite() { suite.cluster.Destroy() suite.cancel() re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs")) + re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/schedule/changeCoordinatorTicker")) + re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/mcs/scheduling/server/changeRunCollectWaitTime")) } func (suite *serverTestSuite) TestAllocID() { @@ -137,7 +141,7 @@ func (suite *serverTestSuite) TestPrimaryChange() { testutil.Eventually(re, func() bool { watchedAddr, ok := suite.pdLeader.GetServicePrimaryAddr(suite.ctx, mcs.SchedulingServiceName) return ok && oldPrimaryAddr == watchedAddr && - len(primary.GetCluster().GetCoordinator().GetSchedulersController().GetSchedulerNames()) == 6 + len(primary.GetCluster().GetCoordinator().GetSchedulersController().GetSchedulerNames()) == 4 }) // change primary primary.Close() @@ -148,7 +152,7 @@ func (suite *serverTestSuite) TestPrimaryChange() { testutil.Eventually(re, func() bool { watchedAddr, ok := suite.pdLeader.GetServicePrimaryAddr(suite.ctx, mcs.SchedulingServiceName) return ok && newPrimaryAddr == watchedAddr && - len(primary.GetCluster().GetCoordinator().GetSchedulersController().GetSchedulerNames()) == 6 + len(primary.GetCluster().GetCoordinator().GetSchedulersController().GetSchedulerNames()) == 4 }) } @@ -203,9 +207,14 @@ func (suite *serverTestSuite) TestForwardStoreHeartbeat() { }) } -func (suite *serverTestSuite) TestDynamicSwitch() { +func (suite *serverTestSuite) TestSchedulingServiceFallback() { re := suite.Require() - // API server will execute scheduling jobs since there is no scheduler server. + leaderServer := suite.pdLeader.GetServer() + conf := leaderServer.GetMicroServiceConfig().Clone() + // Change back to the default value. + conf.EnableSchedulingFallback = true + leaderServer.SetMicroServiceConfig(*conf) + // API server will execute scheduling jobs since there is no scheduling server. testutil.Eventually(re, func() bool { return suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() }) @@ -241,6 +250,50 @@ func (suite *serverTestSuite) TestDynamicSwitch() { }) } +func (suite *serverTestSuite) TestDisableSchedulingServiceFallback() { + re := suite.Require() + + // API server will execute scheduling jobs since there is no scheduling server. + testutil.Eventually(re, func() bool { + return suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() + }) + leaderServer := suite.pdLeader.GetServer() + // After Disabling scheduling service fallback, the API server will stop scheduling. + conf := leaderServer.GetMicroServiceConfig().Clone() + conf.EnableSchedulingFallback = false + leaderServer.SetMicroServiceConfig(*conf) + testutil.Eventually(re, func() bool { + return !suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() + }) + // Enable scheduling service fallback again, the API server will restart scheduling. + conf.EnableSchedulingFallback = true + leaderServer.SetMicroServiceConfig(*conf) + testutil.Eventually(re, func() bool { + return suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() + }) + + tc, err := tests.NewTestSchedulingCluster(suite.ctx, 1, suite.backendEndpoints) + re.NoError(err) + defer tc.Destroy() + tc.WaitForPrimaryServing(re) + // After scheduling server is started, API server will not execute scheduling jobs. + testutil.Eventually(re, func() bool { + return !suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() + }) + // Scheduling server is responsible for executing scheduling jobs. + testutil.Eventually(re, func() bool { + return tc.GetPrimaryServer().GetCluster().IsBackgroundJobsRunning() + }) + // Disable scheduling service fallback and stop scheduling server. API server won't execute scheduling jobs again. + conf.EnableSchedulingFallback = false + leaderServer.SetMicroServiceConfig(*conf) + tc.GetPrimaryServer().Close() + time.Sleep(time.Second) + testutil.Eventually(re, func() bool { + return !suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() + }) +} + func (suite *serverTestSuite) TestSchedulerSync() { re := suite.Require() tc, err := tests.NewTestSchedulingCluster(suite.ctx, 1, suite.backendEndpoints) @@ -315,9 +368,7 @@ func (suite *serverTestSuite) TestSchedulerSync() { defaultSchedulerNames := []string{ schedulers.BalanceLeaderName, schedulers.BalanceRegionName, - schedulers.BalanceWitnessName, schedulers.HotRegionName, - schedulers.TransferWitnessLeaderName, } checkDisabled := func(name string, shouldDisabled bool) { re.NotNil(schedulersController.GetScheduler(name), name) @@ -558,3 +609,69 @@ func waitSyncFinish(re *require.Assertions, tc *tests.TestSchedulingCluster, typ return tc.GetPrimaryServer().GetCluster().GetSharedConfig().GetStoreLimitByType(2, typ) == expectedLimit }) } + +type multipleServerTestSuite struct { + suite.Suite + ctx context.Context + cancel context.CancelFunc + cluster *tests.TestCluster + pdLeader *tests.TestServer + backendEndpoints string +} + +func TestMultipleServerTestSuite(t *testing.T) { + suite.Run(t, new(multipleServerTestSuite)) +} + +func (suite *multipleServerTestSuite) SetupSuite() { + var err error + re := suite.Require() + re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) + suite.ctx, suite.cancel = context.WithCancel(context.Background()) + suite.cluster, err = tests.NewTestAPICluster(suite.ctx, 2) + re.NoError(err) + + err = suite.cluster.RunInitialServers() + re.NoError(err) + + leaderName := suite.cluster.WaitLeader() + suite.pdLeader = suite.cluster.GetServer(leaderName) + suite.backendEndpoints = suite.pdLeader.GetAddr() + re.NoError(suite.pdLeader.BootstrapCluster()) +} + +func (suite *multipleServerTestSuite) TearDownSuite() { + re := suite.Require() + suite.cluster.Destroy() + suite.cancel() + re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs")) +} + +func (suite *multipleServerTestSuite) TestReElectLeader() { + re := suite.Require() + tc, err := tests.NewTestSchedulingCluster(suite.ctx, 1, suite.backendEndpoints) + re.NoError(err) + defer tc.Destroy() + tc.WaitForPrimaryServing(re) + + rc := suite.pdLeader.GetServer().GetRaftCluster() + re.NotNil(rc) + regionLen := 100 + regions := tests.InitRegions(regionLen) + for _, region := range regions { + err = rc.HandleRegionHeartbeat(region) + re.NoError(err) + } + + originLeaderName := suite.pdLeader.GetLeader().GetName() + suite.pdLeader.ResignLeader() + newLeaderName := suite.cluster.WaitLeader() + re.NotEqual(originLeaderName, newLeaderName) + + suite.pdLeader.ResignLeader() + newLeaderName = suite.cluster.WaitLeader() + re.Equal(originLeaderName, newLeaderName) + + rc = suite.pdLeader.GetServer().GetRaftCluster() + rc.IsPrepared() +} diff --git a/tests/integrations/mcs/tso/keyspace_group_manager_test.go b/tests/integrations/mcs/tso/keyspace_group_manager_test.go index 52248086249..af33b85b1a1 100644 --- a/tests/integrations/mcs/tso/keyspace_group_manager_test.go +++ b/tests/integrations/mcs/tso/keyspace_group_manager_test.go @@ -151,6 +151,9 @@ func (suite *tsoKeyspaceGroupManagerTestSuite) TestKeyspacesServedByDefaultKeysp mcs.CheckMultiKeyspacesTSO(suite.ctx, re, clients, func() { time.Sleep(3 * time.Second) }) + for _, client := range clients { + client.Close() + } } func (suite *tsoKeyspaceGroupManagerTestSuite) TestKeyspacesServedByNonDefaultKeyspaceGroups() { @@ -232,6 +235,9 @@ func (suite *tsoKeyspaceGroupManagerTestSuite) TestKeyspacesServedByNonDefaultKe mcs.CheckMultiKeyspacesTSO(suite.ctx, re, clients, func() { time.Sleep(3 * time.Second) }) + for _, client := range clients { + client.Close() + } } func (suite *tsoKeyspaceGroupManagerTestSuite) TestTSOKeyspaceGroupSplit() { diff --git a/tests/integrations/mcs/tso/server_test.go b/tests/integrations/mcs/tso/server_test.go index a6a2c42acc9..b175f63c8f4 100644 --- a/tests/integrations/mcs/tso/server_test.go +++ b/tests/integrations/mcs/tso/server_test.go @@ -186,6 +186,7 @@ func checkTSOPath(re *require.Assertions, isAPIServiceMode bool) { defer cleanup() cli := mcs.SetupClientWithAPIContext(ctx, re, pd.NewAPIContextV2(""), []string{backendEndpoints}) + defer cli.Close() physical, logical, err := cli.GetTS(ctx) re.NoError(err) ts := tsoutil.ComposeTS(physical, logical) diff --git a/tests/integrations/realcluster/Makefile b/tests/integrations/realcluster/Makefile index e03007a4c31..4817b94b5da 100644 --- a/tests/integrations/realcluster/Makefile +++ b/tests/integrations/realcluster/Makefile @@ -48,7 +48,7 @@ kill_cluster: echo $$pid; \ kill $$pid; \ echo "waiting for cluster to exit..."; \ - sleep 10; \ + sleep 30; \ fi test: diff --git a/tests/integrations/realcluster/util.go b/tests/integrations/realcluster/util.go index 412df51894b..f6c8295b6ef 100644 --- a/tests/integrations/realcluster/util.go +++ b/tests/integrations/realcluster/util.go @@ -23,7 +23,7 @@ import ( const physicalShiftBits = 18 var ( - pdAddrs = []string{"127.0.0.1:2379"} + pdAddrs = []string{"http://127.0.0.1:2379"} pdHTTPCli = http.NewClient("pd-real-cluster-test", pdAddrs) ) diff --git a/tests/integrations/tso/client_test.go b/tests/integrations/tso/client_test.go index c8e8f5b2f52..c5cc6ec5d6d 100644 --- a/tests/integrations/tso/client_test.go +++ b/tests/integrations/tso/client_test.go @@ -183,6 +183,9 @@ func (suite *tsoClientTestSuite) TearDownSuite() { suite.tsoCluster.Destroy() } suite.cluster.Destroy() + for _, client := range suite.clients { + client.Close() + } } func (suite *tsoClientTestSuite) TestGetTS() { @@ -252,6 +255,7 @@ func (suite *tsoClientTestSuite) TestDiscoverTSOServiceWithLegacyPath() { defer cancel() client := mcs.SetupClientWithKeyspaceID( ctx, re, keyspaceID, strings.Split(suite.backendEndpoints, ",")) + defer client.Close() var lastTS uint64 for j := 0; j < tsoRequestRound; j++ { physical, logical, err := client.GetTS(ctx) @@ -491,6 +495,7 @@ func TestUpgradingAPIandTSOClusters(t *testing.T) { pdClient, err := pd.NewClientWithContext(context.Background(), []string{backendEndpoints}, pd.SecurityOption{}, pd.WithMaxErrorRetry(1)) re.NoError(err) + defer pdClient.Close() // Create a TSO cluster which has 2 servers tsoCluster, err := tests.NewTestTSOCluster(ctx, 2, backendEndpoints) diff --git a/tests/server/api/operator_test.go b/tests/server/api/operator_test.go index ab453e746a9..cd3f2ac34dc 100644 --- a/tests/server/api/operator_test.go +++ b/tests/server/api/operator_test.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "net/http" + "sort" "strconv" "strings" "testing" @@ -465,7 +466,7 @@ func (suite *operatorTestSuite) checkTransferRegionWithPlacementRule(cluster *te err := tu.CheckPostJSON(testDialClient, url, reqData, tu.StatusOK(re)) re.NoError(err) if sche := cluster.GetSchedulingPrimaryServer(); sche != nil { - // wait for the scheduler server to update the config + // wait for the scheduling server to update the config tu.Eventually(re, func() bool { return sche.GetCluster().GetCheckerConfig().IsPlacementRulesEnabled() == testCase.placementRuleEnable }) @@ -509,6 +510,108 @@ func (suite *operatorTestSuite) checkTransferRegionWithPlacementRule(cluster *te } } +func (suite *operatorTestSuite) TestGetOperatorsAsObject() { + // use a new environment to avoid being affected by other tests + env := tests.NewSchedulingTestEnvironment(suite.T(), + func(conf *config.Config, serverName string) { + conf.Replication.MaxReplicas = 1 + }) + env.RunTestInTwoModes(suite.checkGetOperatorsAsObject) + env.Cleanup() +} + +func (suite *operatorTestSuite) checkGetOperatorsAsObject(cluster *tests.TestCluster) { + re := suite.Require() + suite.pauseRuleChecker(re, cluster) + stores := []*metapb.Store{ + { + Id: 1, + State: metapb.StoreState_Up, + NodeState: metapb.NodeState_Serving, + LastHeartbeat: time.Now().UnixNano(), + }, + { + Id: 2, + State: metapb.StoreState_Up, + NodeState: metapb.NodeState_Serving, + LastHeartbeat: time.Now().UnixNano(), + }, + { + Id: 3, + State: metapb.StoreState_Up, + NodeState: metapb.NodeState_Serving, + LastHeartbeat: time.Now().UnixNano(), + }, + } + + for _, store := range stores { + tests.MustPutStore(re, cluster, store) + } + + urlPrefix := fmt.Sprintf("%s/pd/api/v1", cluster.GetLeaderServer().GetAddr()) + objURL := fmt.Sprintf("%s/operators?object=1", urlPrefix) + resp := make([]operator.OpObject, 0) + + // No operator. + err := tu.ReadGetJSON(re, testDialClient, objURL, &resp) + re.NoError(err) + re.Empty(resp) + + // Merge operator. + r1 := core.NewTestRegionInfo(10, 1, []byte(""), []byte("b"), core.SetWrittenBytes(1000), core.SetReadBytes(1000), core.SetRegionConfVer(1), core.SetRegionVersion(1)) + tests.MustPutRegionInfo(re, cluster, r1) + r2 := core.NewTestRegionInfo(20, 1, []byte("b"), []byte("c"), core.SetWrittenBytes(2000), core.SetReadBytes(0), core.SetRegionConfVer(2), core.SetRegionVersion(3)) + tests.MustPutRegionInfo(re, cluster, r2) + r3 := core.NewTestRegionInfo(30, 1, []byte("c"), []byte("d"), core.SetWrittenBytes(500), core.SetReadBytes(800), core.SetRegionConfVer(3), core.SetRegionVersion(2)) + tests.MustPutRegionInfo(re, cluster, r3) + + err = tu.CheckPostJSON(testDialClient, fmt.Sprintf("%s/operators", urlPrefix), []byte(`{"name":"merge-region", "source_region_id": 10, "target_region_id": 20}`), tu.StatusOK(re)) + re.NoError(err) + err = tu.ReadGetJSON(re, testDialClient, objURL, &resp) + re.NoError(err) + re.Len(resp, 2) + less := func(i, j int) bool { + return resp[i].RegionID < resp[j].RegionID + } + sort.Slice(resp, less) + re.Equal(uint64(10), resp[0].RegionID) + re.Equal("admin-merge-region", resp[0].Desc) + re.Equal("merge: region 10 to 20", resp[0].Brief) + re.Equal("10m0s", resp[0].Timeout) + re.Equal(&metapb.RegionEpoch{ + ConfVer: 1, + Version: 1, + }, resp[0].RegionEpoch) + re.Equal(operator.OpAdmin|operator.OpMerge, resp[0].Kind) + re.Truef(resp[0].Status == operator.CREATED || resp[0].Status == operator.STARTED, "unexpected status %s", resp[0].Status) + re.Equal(uint64(20), resp[1].RegionID) + re.Equal("admin-merge-region", resp[1].Desc) + + // Add peer operator. + peer1 := &metapb.Peer{Id: 100, StoreId: 1} + peer2 := &metapb.Peer{Id: 200, StoreId: 2} + region := &metapb.Region{ + Id: 40, + Peers: []*metapb.Peer{peer1, peer2}, + RegionEpoch: &metapb.RegionEpoch{ + ConfVer: 1, + Version: 1, + }, + StartKey: []byte("d"), + EndKey: []byte(""), + } + regionInfo := core.NewRegionInfo(region, peer1) + tests.MustPutRegionInfo(re, cluster, regionInfo) + err = tu.CheckPostJSON(testDialClient, fmt.Sprintf("%s/operators", urlPrefix), []byte(`{"name":"add-peer", "region_id": 40, "store_id": 3}`), tu.StatusOK(re)) + re.NoError(err) + err = tu.ReadGetJSON(re, testDialClient, objURL, &resp) + re.NoError(err) + re.Len(resp, 3) + sort.Slice(resp, less) + re.Equal(uint64(40), resp[2].RegionID) + re.Equal("admin-add-peer", resp[2].Desc) +} + // pauseRuleChecker will pause rule checker to avoid unexpected operator. func (suite *operatorTestSuite) pauseRuleChecker(re *require.Assertions, cluster *tests.TestCluster) { checkerName := "rule" diff --git a/tests/server/api/rule_test.go b/tests/server/api/rule_test.go index 9819e821d29..a845d2f3e05 100644 --- a/tests/server/api/rule_test.go +++ b/tests/server/api/rule_test.go @@ -1437,7 +1437,7 @@ func (suite *regionRuleTestSuite) checkRegionPlacementRule(cluster *tests.TestCl err = tu.CheckPostJSON(testDialClient, u, reqData, tu.StatusOK(re)) re.NoError(err) if sche := cluster.GetSchedulingPrimaryServer(); sche != nil { - // wait for the scheduler server to update the config + // wait for the scheduling server to update the config tu.Eventually(re, func() bool { return !sche.GetCluster().GetCheckerConfig().IsPlacementRulesEnabled() }) diff --git a/tests/server/join/join_test.go b/tests/server/join/join_test.go index 45eb1f16282..5cdcbc090b8 100644 --- a/tests/server/join/join_test.go +++ b/tests/server/join/join_test.go @@ -47,7 +47,7 @@ func TestSimpleJoin(t *testing.T) { pd1 := cluster.GetServer("pd1") client := pd1.GetEtcdClient() - members, err := etcdutil.ListEtcdMembers(client) + members, err := etcdutil.ListEtcdMembers(ctx, client) re.NoError(err) re.Len(members.Members, 1) @@ -58,7 +58,7 @@ func TestSimpleJoin(t *testing.T) { re.NoError(err) _, err = os.Stat(path.Join(pd2.GetConfig().DataDir, "join")) re.False(os.IsNotExist(err)) - members, err = etcdutil.ListEtcdMembers(client) + members, err = etcdutil.ListEtcdMembers(ctx, client) re.NoError(err) re.Len(members.Members, 2) re.Equal(pd1.GetClusterID(), pd2.GetClusterID()) @@ -73,7 +73,7 @@ func TestSimpleJoin(t *testing.T) { re.NoError(err) _, err = os.Stat(path.Join(pd3.GetConfig().DataDir, "join")) re.False(os.IsNotExist(err)) - members, err = etcdutil.ListEtcdMembers(client) + members, err = etcdutil.ListEtcdMembers(ctx, client) re.NoError(err) re.Len(members.Members, 3) re.Equal(pd1.GetClusterID(), pd3.GetClusterID()) @@ -108,7 +108,7 @@ func TestFailedAndDeletedPDJoinsPreviousCluster(t *testing.T) { res := cluster.RunServer(pd3) re.Error(<-res) - members, err := etcdutil.ListEtcdMembers(client) + members, err := etcdutil.ListEtcdMembers(ctx, client) re.NoError(err) re.Len(members.Members, 2) } @@ -141,7 +141,7 @@ func TestDeletedPDJoinsPreviousCluster(t *testing.T) { res := cluster.RunServer(pd3) re.Error(<-res) - members, err := etcdutil.ListEtcdMembers(client) + members, err := etcdutil.ListEtcdMembers(ctx, client) re.NoError(err) re.Len(members.Members, 2) } diff --git a/tests/server/region_syncer/region_syncer_test.go b/tests/server/region_syncer/region_syncer_test.go index 87b5c0683c7..a91bbaf6b40 100644 --- a/tests/server/region_syncer/region_syncer_test.go +++ b/tests/server/region_syncer/region_syncer_test.go @@ -20,11 +20,9 @@ import ( "time" "github.com/pingcap/failpoint" - "github.com/pingcap/kvproto/pkg/metapb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" - "github.com/tikv/pd/pkg/mock/mockid" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/server/config" "github.com/tikv/pd/tests" @@ -35,15 +33,6 @@ func TestMain(m *testing.M) { goleak.VerifyTestMain(m, testutil.LeakOptions...) } -type idAllocator struct { - allocator *mockid.IDAllocator -} - -func (i *idAllocator) alloc() uint64 { - v, _ := i.allocator.Alloc() - return v -} - func TestRegionSyncer(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -77,7 +66,7 @@ func TestRegionSyncer(t *testing.T) { }) regionLen := 110 - regions := initRegions(regionLen) + regions := tests.InitRegions(regionLen) for _, region := range regions { err = rc.HandleRegionHeartbeat(region) re.NoError(err) @@ -186,7 +175,7 @@ func TestFullSyncWithAddMember(t *testing.T) { rc := leaderServer.GetServer().GetRaftCluster() re.NotNil(rc) regionLen := 110 - regions := initRegions(regionLen) + regions := tests.InitRegions(regionLen) for _, region := range regions { err = rc.HandleRegionHeartbeat(region) re.NoError(err) @@ -230,7 +219,7 @@ func TestPrepareChecker(t *testing.T) { rc := leaderServer.GetServer().GetRaftCluster() re.NotNil(rc) regionLen := 110 - regions := initRegions(regionLen) + regions := tests.InitRegions(regionLen) for _, region := range regions { err = rc.HandleRegionHeartbeat(region) re.NoError(err) @@ -279,7 +268,7 @@ func TestPrepareCheckerWithTransferLeader(t *testing.T) { rc := leaderServer.GetServer().GetRaftCluster() re.NotNil(rc) regionLen := 100 - regions := initRegions(regionLen) + regions := tests.InitRegions(regionLen) for _, region := range regions { err = rc.HandleRegionHeartbeat(region) re.NoError(err) @@ -306,36 +295,3 @@ func TestPrepareCheckerWithTransferLeader(t *testing.T) { re.True(rc.IsPrepared()) re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/schedule/changeCoordinatorTicker")) } - -func initRegions(regionLen int) []*core.RegionInfo { - allocator := &idAllocator{allocator: mockid.NewIDAllocator()} - regions := make([]*core.RegionInfo, 0, regionLen) - for i := 0; i < regionLen; i++ { - r := &metapb.Region{ - Id: allocator.alloc(), - RegionEpoch: &metapb.RegionEpoch{ - ConfVer: 1, - Version: 1, - }, - StartKey: []byte{byte(i)}, - EndKey: []byte{byte(i + 1)}, - Peers: []*metapb.Peer{ - {Id: allocator.alloc(), StoreId: uint64(1)}, - {Id: allocator.alloc(), StoreId: uint64(2)}, - {Id: allocator.alloc(), StoreId: uint64(3)}, - }, - } - region := core.NewRegionInfo(r, r.Peers[0], core.SetSource(core.Heartbeat)) - // Here is used to simulate the upgrade process. - if i < regionLen/2 { - buckets := &metapb.Buckets{ - RegionId: r.Id, - Keys: [][]byte{r.StartKey, r.EndKey}, - Version: 1, - } - region.UpdateBuckets(buckets, region.GetBuckets()) - } - regions = append(regions, region) - } - return regions -} diff --git a/tests/testutil.go b/tests/testutil.go index f85595492bf..106cddc9dfb 100644 --- a/tests/testutil.go +++ b/tests/testutil.go @@ -37,6 +37,7 @@ import ( sc "github.com/tikv/pd/pkg/mcs/scheduling/server/config" tso "github.com/tikv/pd/pkg/mcs/tso/server" "github.com/tikv/pd/pkg/mcs/utils" + "github.com/tikv/pd/pkg/mock/mockid" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/pkg/versioninfo" @@ -365,3 +366,46 @@ func (s *SchedulingTestEnvironment) startCluster(m mode) { s.clusters[apiMode] = cluster } } + +type idAllocator struct { + allocator *mockid.IDAllocator +} + +func (i *idAllocator) alloc() uint64 { + v, _ := i.allocator.Alloc() + return v +} + +// InitRegions is used for test purpose. +func InitRegions(regionLen int) []*core.RegionInfo { + allocator := &idAllocator{allocator: mockid.NewIDAllocator()} + regions := make([]*core.RegionInfo, 0, regionLen) + for i := 0; i < regionLen; i++ { + r := &metapb.Region{ + Id: allocator.alloc(), + RegionEpoch: &metapb.RegionEpoch{ + ConfVer: 1, + Version: 1, + }, + StartKey: []byte{byte(i)}, + EndKey: []byte{byte(i + 1)}, + Peers: []*metapb.Peer{ + {Id: allocator.alloc(), StoreId: uint64(1)}, + {Id: allocator.alloc(), StoreId: uint64(2)}, + {Id: allocator.alloc(), StoreId: uint64(3)}, + }, + } + region := core.NewRegionInfo(r, r.Peers[0], core.SetSource(core.Heartbeat)) + // Here is used to simulate the upgrade process. + if i < regionLen/2 { + buckets := &metapb.Buckets{ + RegionId: r.Id, + Keys: [][]byte{r.StartKey, r.EndKey}, + Version: 1, + } + region.UpdateBuckets(buckets, region.GetBuckets()) + } + regions = append(regions, region) + } + return regions +} diff --git a/tools/go.mod b/tools/go.mod index 9a75b292f99..767ada3c8cc 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -27,7 +27,7 @@ require ( github.com/pingcap/kvproto v0.0.0-20231222062942-c0c73f41d0b2 github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.11.1 + github.com/prometheus/client_golang v1.18.0 github.com/spf13/cobra v1.0.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 @@ -119,7 +119,6 @@ require ( github.com/mailru/easyjson v0.7.6 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-sqlite3 v1.14.15 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/minio/sio v0.3.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -134,9 +133,9 @@ require ( github.com/pingcap/tipb v0.0.0-20220718022156-3e2483c20a9e // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.26.0 // indirect - github.com/prometheus/procfs v0.6.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.46.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/rs/cors v1.7.0 // indirect github.com/samber/lo v1.37.0 // indirect github.com/sasha-s/go-deadlock v0.2.0 // indirect @@ -168,20 +167,20 @@ require ( go.uber.org/fx v1.12.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/arch v0.3.0 // indirect - golang.org/x/crypto v0.17.0 // indirect + golang.org/x/crypto v0.18.0 // indirect golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 // indirect golang.org/x/image v0.10.0 // indirect - golang.org/x/net v0.18.0 // indirect - golang.org/x/oauth2 v0.4.0 // indirect + golang.org/x/net v0.20.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect golang.org/x/sync v0.4.0 // indirect - golang.org/x/sys v0.15.0 // indirect + golang.org/x/sys v0.16.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.14.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/protobuf v1.32.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/tools/go.sum b/tools/go.sum index 5f4d176469d..54acc216ec0 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -19,10 +19,7 @@ github.com/Xeoncross/go-aesctr-with-hmac v0.0.0-20200623134604-12b17a7ff502 h1:L github.com/Xeoncross/go-aesctr-with-hmac v0.0.0-20200623134604-12b17a7ff502/go.mod h1:pmnBM9bxWSiHvC/gSWunUIyDvGn33EkP2CUjxFKtTTM= github.com/agiledragon/gomonkey/v2 v2.3.1/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alvaroloes/enumer v1.1.2/go.mod h1:FxrjvuXoDAx9isTJrv4c+T410zFi0DtXIT0m65DJ+Wo= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4= @@ -77,7 +74,6 @@ github.com/cenkalti/backoff/v4 v4.0.2 h1:JIufpQLbh4DkbQoii76ItQIUFzevQSqOLZca4ea github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= @@ -153,11 +149,8 @@ github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SU github.com/go-echarts/go-echarts v1.0.0 h1:n181E4iXwj4zrU9VYmdM2m8dyhERt2w9k9YhHqdp6A8= github.com/go-echarts/go-echarts v1.0.0/go.mod h1:qbmyAb/Rl1f2w7wKba1D4LoNq4U164yO4/wedFbcWyo= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -222,13 +215,6 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -239,10 +225,7 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -324,13 +307,10 @@ github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -343,8 +323,9 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -372,7 +353,6 @@ github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebG github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microsoft/go-mssqldb v0.17.0 h1:Fto83dMZPnYv1Zwx5vHHxpNraeEaUlQ/hhHLgZiaenE= github.com/microsoft/go-mssqldb v0.17.0/go.mod h1:OkoNGhGEs8EZqchVTtochlXruEhEOaO4S0d2sB5aeGQ= @@ -455,34 +435,31 @@ github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:Om github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= +github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -647,8 +624,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 h1:QLureRX3moex6NVu/Lr4MGakp9FdA7sBHGBmvRW7NaM= @@ -683,10 +660,8 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -697,20 +672,18 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -726,20 +699,15 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -754,8 +722,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -829,17 +797,11 @@ google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -864,9 +826,7 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/tools/pd-api-bench/cases/cases.go b/tools/pd-api-bench/cases/cases.go index 79d2ce873c5..d289ecef06b 100644 --- a/tools/pd-api-bench/cases/cases.go +++ b/tools/pd-api-bench/cases/cases.go @@ -17,11 +17,13 @@ package cases import ( "context" "fmt" - "log" "math/rand" + "github.com/pingcap/log" pd "github.com/tikv/pd/client" pdHttp "github.com/tikv/pd/client/http" + "go.etcd.io/etcd/clientv3" + "go.uber.org/zap" ) var ( @@ -50,10 +52,28 @@ func InitCluster(ctx context.Context, cli pd.Client, httpCli pdHttp.Client) erro for _, store := range stores { storesID = append(storesID, store.GetId()) } - log.Printf("This cluster has region %d, and store %d[%v]", totalRegion, totalStore, storesID) + log.Info("init cluster info", zap.Int("total-region", totalRegion), zap.Int("total-store", totalStore), zap.Any("store-ids", storesID)) return nil } +// Config is the configuration for the case. +type Config struct { + QPS int64 `toml:"qps" json:"qps"` + Burst int64 `toml:"burst" json:"burst"` +} + +func newConfig() *Config { + return &Config{ + Burst: 1, + } +} + +// Clone returns a cloned configuration. +func (c *Config) Clone() *Config { + cfg := *c + return &cfg +} + // Case is the interface for all cases. type Case interface { Name() string @@ -61,12 +81,12 @@ type Case interface { GetQPS() int64 SetBurst(int64) GetBurst() int64 + GetConfig() *Config } type baseCase struct { - name string - qps int64 - burst int64 + name string + cfg *Config } func (c *baseCase) Name() string { @@ -74,19 +94,41 @@ func (c *baseCase) Name() string { } func (c *baseCase) SetQPS(qps int64) { - c.qps = qps + c.cfg.QPS = qps } func (c *baseCase) GetQPS() int64 { - return c.qps + return c.cfg.QPS } func (c *baseCase) SetBurst(burst int64) { - c.burst = burst + c.cfg.Burst = burst } func (c *baseCase) GetBurst() int64 { - return c.burst + return c.cfg.Burst +} + +func (c *baseCase) GetConfig() *Config { + return c.cfg.Clone() +} + +// ETCDCase is the interface for all etcd api cases. +type ETCDCase interface { + Case + Init(context.Context, *clientv3.Client) error + Unary(context.Context, *clientv3.Client) error +} + +// ETCDCraeteFn is function type to create ETCDCase. +type ETCDCraeteFn func() ETCDCase + +// ETCDCaseFnMap is the map for all ETCD case creation function. +var ETCDCaseFnMap = map[string]ETCDCraeteFn{ + "Get": newGetKV(), + "Put": newPutKV(), + "Delete": newDeleteKV(), + "Txn": newTxnKV(), } // GRPCCase is the interface for all gRPC cases. @@ -100,16 +142,14 @@ type GRPCCraeteFn func() GRPCCase // GRPCCaseFnMap is the map for all gRPC case creation function. var GRPCCaseFnMap = map[string]GRPCCraeteFn{ - "GetRegion": newGetRegion(), - "GetStore": newGetStore(), - "GetStores": newGetStores(), - "ScanRegions": newScanRegions(), - "Tso": newTso(), + "GetRegion": newGetRegion(), + "GetRegionEnableFollower": newGetRegionEnableFollower(), + "GetStore": newGetStore(), + "GetStores": newGetStores(), + "ScanRegions": newScanRegions(), + "Tso": newTso(), } -// GRPCCaseMap is the map for all gRPC case creation function. -var GRPCCaseMap = map[string]GRPCCase{} - // HTTPCase is the interface for all HTTP cases. type HTTPCase interface { Case @@ -125,9 +165,6 @@ var HTTPCaseFnMap = map[string]HTTPCraeteFn{ "GetMinResolvedTS": newMinResolvedTS(), } -// HTTPCaseMap is the map for all HTTP cases. -var HTTPCaseMap = map[string]HTTPCase{} - type minResolvedTS struct { *baseCase } @@ -136,9 +173,8 @@ func newMinResolvedTS() func() HTTPCase { return func() HTTPCase { return &minResolvedTS{ baseCase: &baseCase{ - name: "GetMinResolvedTS", - qps: 1000, - burst: 1, + name: "GetMinResolvedTS", + cfg: newConfig(), }, } } @@ -147,7 +183,7 @@ func newMinResolvedTS() func() HTTPCase { func (c *minResolvedTS) Do(ctx context.Context, cli pdHttp.Client) error { minResolvedTS, storesMinResolvedTS, err := cli.GetMinResolvedTSByStoresIDs(ctx, storesID) if Debug { - log.Printf("Do %s: minResolvedTS: %d storesMinResolvedTS: %v err: %v", c.name, minResolvedTS, storesMinResolvedTS, err) + log.Info("do HTTP case", zap.String("case", c.name), zap.Uint64("min-resolved-ts", minResolvedTS), zap.Any("store-min-resolved-ts", storesMinResolvedTS), zap.Error(err)) } if err != nil { return err @@ -164,9 +200,8 @@ func newRegionStats() func() HTTPCase { return func() HTTPCase { return ®ionsStats{ baseCase: &baseCase{ - name: "GetRegionStatus", - qps: 100, - burst: 1, + name: "GetRegionStatus", + cfg: newConfig(), }, regionSample: 1000, } @@ -184,7 +219,7 @@ func (c *regionsStats) Do(ctx context.Context, cli pdHttp.Client) error { regionStats, err := cli.GetRegionStatusByKeyRange(ctx, pdHttp.NewKeyRange(generateKeyForSimulator(startID, 56), generateKeyForSimulator(endID, 56)), false) if Debug { - log.Printf("Do %s: regionStats: %v err: %v", c.name, regionStats, err) + log.Info("do HTTP case", zap.String("case", c.name), zap.Any("region-stats", regionStats), zap.Error(err)) } if err != nil { return err @@ -200,9 +235,8 @@ func newGetRegion() func() GRPCCase { return func() GRPCCase { return &getRegion{ baseCase: &baseCase{ - name: "GetRegion", - qps: 10000, - burst: 1, + name: "GetRegion", + cfg: newConfig(), }, } } @@ -217,6 +251,30 @@ func (c *getRegion) Unary(ctx context.Context, cli pd.Client) error { return nil } +type getRegionEnableFollower struct { + *baseCase +} + +func newGetRegionEnableFollower() func() GRPCCase { + return func() GRPCCase { + return &getRegionEnableFollower{ + baseCase: &baseCase{ + name: "GetRegionEnableFollower", + cfg: newConfig(), + }, + } + } +} + +func (c *getRegionEnableFollower) Unary(ctx context.Context, cli pd.Client) error { + id := rand.Intn(totalRegion)*4 + 1 + _, err := cli.GetRegion(ctx, generateKeyForSimulator(id, 56), pd.WithAllowFollowerHandle()) + if err != nil { + return err + } + return nil +} + type scanRegions struct { *baseCase regionSample int @@ -226,9 +284,8 @@ func newScanRegions() func() GRPCCase { return func() GRPCCase { return &scanRegions{ baseCase: &baseCase{ - name: "ScanRegions", - qps: 10000, - burst: 1, + name: "ScanRegions", + cfg: newConfig(), }, regionSample: 10000, } @@ -255,9 +312,8 @@ func newTso() func() GRPCCase { return func() GRPCCase { return &tso{ baseCase: &baseCase{ - name: "Tso", - qps: 10000, - burst: 1, + name: "Tso", + cfg: newConfig(), }, } } @@ -279,9 +335,8 @@ func newGetStore() func() GRPCCase { return func() GRPCCase { return &getStore{ baseCase: &baseCase{ - name: "GetStore", - qps: 10000, - burst: 1, + name: "GetStore", + cfg: newConfig(), }, } } @@ -304,9 +359,8 @@ func newGetStores() func() GRPCCase { return func() GRPCCase { return &getStores{ baseCase: &baseCase{ - name: "GetStores", - qps: 10000, - burst: 1, + name: "GetStores", + cfg: newConfig(), }, } } @@ -326,3 +380,102 @@ func generateKeyForSimulator(id int, keyLen int) []byte { copy(k, fmt.Sprintf("%010d", id)) return k } + +type getKV struct { + *baseCase +} + +func newGetKV() func() ETCDCase { + return func() ETCDCase { + return &getKV{ + baseCase: &baseCase{ + name: "Get", + cfg: newConfig(), + }, + } + } +} + +func (c *getKV) Init(ctx context.Context, cli *clientv3.Client) error { + for i := 0; i < 100; i++ { + _, err := cli.Put(ctx, fmt.Sprintf("/test/0001/%4d", i), fmt.Sprintf("%4d", i)) + if err != nil { + return err + } + } + return nil +} + +func (c *getKV) Unary(ctx context.Context, cli *clientv3.Client) error { + _, err := cli.Get(ctx, "/test/0001", clientv3.WithPrefix()) + return err +} + +type putKV struct { + *baseCase +} + +func newPutKV() func() ETCDCase { + return func() ETCDCase { + return &putKV{ + baseCase: &baseCase{ + name: "Put", + cfg: newConfig(), + }, + } + } +} + +func (c *putKV) Init(ctx context.Context, cli *clientv3.Client) error { return nil } + +func (c *putKV) Unary(ctx context.Context, cli *clientv3.Client) error { + _, err := cli.Put(ctx, "/test/0001/0000", "test") + return err +} + +type deleteKV struct { + *baseCase +} + +func newDeleteKV() func() ETCDCase { + return func() ETCDCase { + return &deleteKV{ + baseCase: &baseCase{ + name: "Put", + cfg: newConfig(), + }, + } + } +} + +func (c *deleteKV) Init(ctx context.Context, cli *clientv3.Client) error { return nil } + +func (c *deleteKV) Unary(ctx context.Context, cli *clientv3.Client) error { + _, err := cli.Delete(ctx, "/test/0001/0000") + return err +} + +type txnKV struct { + *baseCase +} + +func newTxnKV() func() ETCDCase { + return func() ETCDCase { + return &txnKV{ + baseCase: &baseCase{ + name: "Put", + cfg: newConfig(), + }, + } + } +} + +func (c *txnKV) Init(ctx context.Context, cli *clientv3.Client) error { return nil } + +func (c *txnKV) Unary(ctx context.Context, cli *clientv3.Client) error { + txn := cli.Txn(ctx) + txn = txn.If(clientv3.Compare(clientv3.Value("/test/0001/0000"), "=", "test")) + txn = txn.Then(clientv3.OpPut("/test/0001/0000", "test2")) + _, err := txn.Commit() + return err +} diff --git a/tools/pd-api-bench/cases/controller.go b/tools/pd-api-bench/cases/controller.go new file mode 100644 index 00000000000..db42c469843 --- /dev/null +++ b/tools/pd-api-bench/cases/controller.go @@ -0,0 +1,386 @@ +// Copyright 2024 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cases + +import ( + "context" + "sync" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/log" + pd "github.com/tikv/pd/client" + pdHttp "github.com/tikv/pd/client/http" + "go.etcd.io/etcd/clientv3" + "go.uber.org/zap" +) + +var base = int64(time.Second) / int64(time.Microsecond) + +// Coordinator managers the operation of the gRPC and HTTP case. +type Coordinator struct { + ctx context.Context + + httpClients []pdHttp.Client + gRPCClients []pd.Client + etcdClients []*clientv3.Client + + http map[string]*httpController + grpc map[string]*gRPCController + etcd map[string]*etcdController + + mu sync.RWMutex +} + +// NewCoordinator returns a new coordinator. +func NewCoordinator(ctx context.Context, httpClients []pdHttp.Client, gRPCClients []pd.Client, etcdClients []*clientv3.Client) *Coordinator { + return &Coordinator{ + ctx: ctx, + httpClients: httpClients, + gRPCClients: gRPCClients, + etcdClients: etcdClients, + http: make(map[string]*httpController), + grpc: make(map[string]*gRPCController), + etcd: make(map[string]*etcdController), + } +} + +// GetHTTPCase returns the HTTP case config. +func (c *Coordinator) GetHTTPCase(name string) (*Config, error) { + c.mu.RLock() + defer c.mu.RUnlock() + if controller, ok := c.http[name]; ok { + return controller.GetConfig(), nil + } + return nil, errors.Errorf("case %v does not exist.", name) +} + +// GetGRPCCase returns the gRPC case config. +func (c *Coordinator) GetGRPCCase(name string) (*Config, error) { + c.mu.RLock() + defer c.mu.RUnlock() + if controller, ok := c.grpc[name]; ok { + return controller.GetConfig(), nil + } + return nil, errors.Errorf("case %v does not exist.", name) +} + +// GetETCDCase returns the etcd case config. +func (c *Coordinator) GetETCDCase(name string) (*Config, error) { + c.mu.RLock() + defer c.mu.RUnlock() + if controller, ok := c.etcd[name]; ok { + return controller.GetConfig(), nil + } + return nil, errors.Errorf("case %v does not exist.", name) +} + +// GetAllHTTPCases returns the all HTTP case configs. +func (c *Coordinator) GetAllHTTPCases() map[string]*Config { + c.mu.RLock() + defer c.mu.RUnlock() + ret := make(map[string]*Config) + for name, c := range c.http { + ret[name] = c.GetConfig() + } + return ret +} + +// GetAllGRPCCases returns the all gRPC case configs. +func (c *Coordinator) GetAllGRPCCases() map[string]*Config { + c.mu.RLock() + defer c.mu.RUnlock() + ret := make(map[string]*Config) + for name, c := range c.grpc { + ret[name] = c.GetConfig() + } + return ret +} + +// GetAllETCDCases returns the all etcd case configs. +func (c *Coordinator) GetAllETCDCases() map[string]*Config { + c.mu.RLock() + defer c.mu.RUnlock() + ret := make(map[string]*Config) + for name, c := range c.etcd { + ret[name] = c.GetConfig() + } + return ret +} + +// SetHTTPCase sets the config for the specific case. +func (c *Coordinator) SetHTTPCase(name string, cfg *Config) error { + c.mu.Lock() + defer c.mu.Unlock() + if fn, ok := HTTPCaseFnMap[name]; ok { + var controller *httpController + if controller, ok = c.http[name]; !ok { + controller = newHTTPController(c.ctx, c.httpClients, fn) + c.http[name] = controller + } + controller.stop() + controller.SetQPS(cfg.QPS) + if cfg.Burst > 0 { + controller.SetBurst(cfg.Burst) + } + controller.run() + } else { + return errors.Errorf("HTTP case %s not implemented", name) + } + return nil +} + +// SetGRPCCase sets the config for the specific case. +func (c *Coordinator) SetGRPCCase(name string, cfg *Config) error { + c.mu.Lock() + defer c.mu.Unlock() + if fn, ok := GRPCCaseFnMap[name]; ok { + var controller *gRPCController + if controller, ok = c.grpc[name]; !ok { + controller = newGRPCController(c.ctx, c.gRPCClients, fn) + c.grpc[name] = controller + } + controller.stop() + controller.SetQPS(cfg.QPS) + if cfg.Burst > 0 { + controller.SetBurst(cfg.Burst) + } + controller.run() + } else { + return errors.Errorf("gRPC case %s not implemented", name) + } + return nil +} + +// SetETCDCase sets the config for the specific case. +func (c *Coordinator) SetETCDCase(name string, cfg *Config) error { + c.mu.Lock() + defer c.mu.Unlock() + if fn, ok := ETCDCaseFnMap[name]; ok { + var controller *etcdController + if controller, ok = c.etcd[name]; !ok { + controller = newEtcdController(c.ctx, c.etcdClients, fn) + c.etcd[name] = controller + } + controller.stop() + controller.SetQPS(cfg.QPS) + if cfg.Burst > 0 { + controller.SetBurst(cfg.Burst) + } + controller.run() + } else { + return errors.Errorf("etcd case %s not implemented", name) + } + return nil +} + +type httpController struct { + HTTPCase + clients []pdHttp.Client + pctx context.Context + + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup +} + +func newHTTPController(ctx context.Context, clis []pdHttp.Client, fn HTTPCraeteFn) *httpController { + c := &httpController{ + pctx: ctx, + clients: clis, + HTTPCase: fn(), + } + return c +} + +// run tries to run the HTTP api bench. +func (c *httpController) run() { + if c.GetQPS() <= 0 || c.cancel != nil { + return + } + c.ctx, c.cancel = context.WithCancel(c.pctx) + qps := c.GetQPS() + burst := c.GetBurst() + cliNum := int64(len(c.clients)) + tt := time.Duration(base/qps*burst*cliNum) * time.Microsecond + log.Info("begin to run http case", zap.String("case", c.Name()), zap.Int64("qps", qps), zap.Int64("burst", burst), zap.Duration("interval", tt)) + for _, hCli := range c.clients { + c.wg.Add(1) + go func(hCli pdHttp.Client) { + defer c.wg.Done() + var ticker = time.NewTicker(tt) + defer ticker.Stop() + for { + select { + case <-ticker.C: + for i := int64(0); i < burst; i++ { + err := c.Do(c.ctx, hCli) + if err != nil { + log.Error("meet erorr when doing HTTP request", zap.String("case", c.Name()), zap.Error(err)) + } + } + case <-c.ctx.Done(): + log.Info("Got signal to exit running HTTP case") + return + } + } + }(hCli) + } +} + +// stop stops the HTTP api bench. +func (c *httpController) stop() { + if c.cancel == nil { + return + } + c.cancel() + c.cancel = nil + c.wg.Wait() +} + +type gRPCController struct { + GRPCCase + clients []pd.Client + pctx context.Context + + ctx context.Context + cancel context.CancelFunc + + wg sync.WaitGroup +} + +func newGRPCController(ctx context.Context, clis []pd.Client, fn GRPCCraeteFn) *gRPCController { + c := &gRPCController{ + pctx: ctx, + clients: clis, + GRPCCase: fn(), + } + return c +} + +// run tries to run the gRPC api bench. +func (c *gRPCController) run() { + if c.GetQPS() <= 0 || c.cancel != nil { + return + } + c.ctx, c.cancel = context.WithCancel(c.pctx) + qps := c.GetQPS() + burst := c.GetBurst() + cliNum := int64(len(c.clients)) + tt := time.Duration(base/qps*burst*cliNum) * time.Microsecond + log.Info("begin to run gRPC case", zap.String("case", c.Name()), zap.Int64("qps", qps), zap.Int64("burst", burst), zap.Duration("interval", tt)) + for _, cli := range c.clients { + c.wg.Add(1) + go func(cli pd.Client) { + defer c.wg.Done() + var ticker = time.NewTicker(tt) + defer ticker.Stop() + for { + select { + case <-ticker.C: + for i := int64(0); i < burst; i++ { + err := c.Unary(c.ctx, cli) + if err != nil { + log.Error("meet erorr when doing gRPC request", zap.String("case", c.Name()), zap.Error(err)) + } + } + case <-c.ctx.Done(): + log.Info("Got signal to exit running gRPC case") + return + } + } + }(cli) + } +} + +// stop stops the gRPC api bench. +func (c *gRPCController) stop() { + if c.cancel == nil { + return + } + c.cancel() + c.cancel = nil + c.wg.Wait() +} + +type etcdController struct { + ETCDCase + clients []*clientv3.Client + pctx context.Context + + ctx context.Context + cancel context.CancelFunc + + wg sync.WaitGroup +} + +func newEtcdController(ctx context.Context, clis []*clientv3.Client, fn ETCDCraeteFn) *etcdController { + c := &etcdController{ + pctx: ctx, + clients: clis, + ETCDCase: fn(), + } + return c +} + +// run tries to run the gRPC api bench. +func (c *etcdController) run() { + if c.GetQPS() <= 0 || c.cancel != nil { + return + } + c.ctx, c.cancel = context.WithCancel(c.pctx) + qps := c.GetQPS() + burst := c.GetBurst() + cliNum := int64(len(c.clients)) + tt := time.Duration(base/qps*burst*cliNum) * time.Microsecond + log.Info("begin to run etcd case", zap.String("case", c.Name()), zap.Int64("qps", qps), zap.Int64("burst", burst), zap.Duration("interval", tt)) + err := c.Init(c.ctx, c.clients[0]) + if err != nil { + log.Error("init error", zap.String("case", c.Name()), zap.Error(err)) + return + } + for _, cli := range c.clients { + c.wg.Add(1) + go func(cli *clientv3.Client) { + defer c.wg.Done() + var ticker = time.NewTicker(tt) + defer ticker.Stop() + for { + select { + case <-ticker.C: + for i := int64(0); i < burst; i++ { + err := c.Unary(c.ctx, cli) + if err != nil { + log.Error("meet erorr when doing etcd request", zap.String("case", c.Name()), zap.Error(err)) + } + } + case <-c.ctx.Done(): + log.Info("Got signal to exit running etcd case") + return + } + } + }(cli) + } +} + +// stop stops the etcd api bench. +func (c *etcdController) stop() { + if c.cancel == nil { + return + } + c.cancel() + c.cancel = nil + c.wg.Wait() +} diff --git a/tools/pd-api-bench/config/config.go b/tools/pd-api-bench/config/config.go index 783a7ee463a..675e665ab0a 100644 --- a/tools/pd-api-bench/config/config.go +++ b/tools/pd-api-bench/config/config.go @@ -29,6 +29,7 @@ type Config struct { flagSet *flag.FlagSet configFile string PDAddr string `toml:"pd" json:"pd"` + StatusAddr string `toml:"status" json:"status"` Log log.Config `toml:"log" json:"log"` Logger *zap.Logger @@ -42,13 +43,9 @@ type Config struct { KeyPath string `toml:"key-path" json:"key-path"` // only for init - HTTP map[string]caseConfig `toml:"http" json:"http"` - GRPC map[string]caseConfig `toml:"grpc" json:"grpc"` -} - -type caseConfig struct { - QPS int64 `toml:"qps" json:"qps"` - Burst int64 `toml:"burst" json:"burst"` + HTTP map[string]cases.Config `toml:"http" json:"http"` + GRPC map[string]cases.Config `toml:"grpc" json:"grpc"` + ETCD map[string]cases.Config `toml:"etcd" json:"etcd"` } // NewConfig return a set of settings. @@ -58,6 +55,8 @@ func NewConfig(flagSet *flag.FlagSet) *Config { fs := cfg.flagSet fs.StringVar(&cfg.configFile, "config", "", "config file") fs.StringVar(&cfg.PDAddr, "pd", "http://127.0.0.1:2379", "pd address") + fs.StringVar(&cfg.Log.File.Filename, "log-file", "", "log file path") + fs.StringVar(&cfg.StatusAddr, "status", "127.0.0.1:10081", "status address") fs.Int64Var(&cfg.Client, "client", 1, "client number") fs.StringVar(&cfg.CaPath, "cacert", "", "path of file that contains list of trusted SSL CAs") fs.StringVar(&cfg.CertPath, "cert", "", "path of file that contains X509 certificate in PEM format") @@ -93,43 +92,29 @@ func (c *Config) Parse(arguments []string) error { return errors.Errorf("'%s' is an invalid flag", c.flagSet.Arg(0)) } + return nil +} + +// InitCoordinator set case config from config itself. +func (c *Config) InitCoordinator(co *cases.Coordinator) { for name, cfg := range c.HTTP { - if fn, ok := cases.HTTPCaseFnMap[name]; ok { - var cas cases.HTTPCase - if cas, ok = cases.HTTPCaseMap[name]; !ok { - cas = fn() - cases.HTTPCaseMap[name] = cas - } - if cfg.QPS > 0 { - cas.SetQPS(cfg.QPS) - } - if cfg.Burst > 0 { - cas.SetBurst(cfg.Burst) - } - } else { - log.Warn("HTTP case not implemented", zap.String("case", name)) + err := co.SetHTTPCase(name, &cfg) + if err != nil { + log.Error("create HTTP case failed", zap.Error(err)) } } - for name, cfg := range c.GRPC { - if fn, ok := cases.GRPCCaseFnMap[name]; ok { - var cas cases.GRPCCase - if cas, ok = cases.GRPCCaseMap[name]; !ok { - cas = fn() - cases.GRPCCaseMap[name] = cas - } - if cfg.QPS > 0 { - cas.SetQPS(cfg.QPS) - } - if cfg.Burst > 0 { - cas.SetBurst(cfg.Burst) - } - } else { - log.Warn("gRPC case not implemented", zap.String("case", name)) + err := co.SetGRPCCase(name, &cfg) + if err != nil { + log.Error("create gRPC case failed", zap.Error(err)) + } + } + for name, cfg := range c.ETCD { + err := co.SetETCDCase(name, &cfg) + if err != nil { + log.Error("create etcd case failed", zap.Error(err)) } } - - return nil } // Adjust is used to adjust configurations diff --git a/tools/pd-api-bench/config/simconfig.toml b/tools/pd-api-bench/config/simconfig.toml index 9a05001973b..48e5a2595ba 100644 --- a/tools/pd-api-bench/config/simconfig.toml +++ b/tools/pd-api-bench/config/simconfig.toml @@ -9,4 +9,8 @@ pd = "127.0.0.1:2379" burst = 1 [grpc.GetStores] qps = 1000 + burst = 1 +[etcd] + [etcd.Get] + qps = 1 burst = 1 \ No newline at end of file diff --git a/tools/pd-api-bench/main.go b/tools/pd-api-bench/main.go index 56e7ee761b2..dff40555fd6 100644 --- a/tools/pd-api-bench/main.go +++ b/tools/pd-api-bench/main.go @@ -17,6 +17,7 @@ package main import ( "context" "crypto/tls" + "net/http" "os" "os/signal" "strconv" @@ -24,33 +25,62 @@ import ( "syscall" "time" + "github.com/gin-contrib/cors" + "github.com/gin-contrib/gzip" + "github.com/gin-contrib/pprof" + "github.com/gin-gonic/gin" "github.com/pingcap/log" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" flag "github.com/spf13/pflag" pd "github.com/tikv/pd/client" pdHttp "github.com/tikv/pd/client/http" "github.com/tikv/pd/client/tlsutil" + "github.com/tikv/pd/pkg/mcs/utils" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/tools/pd-api-bench/cases" "github.com/tikv/pd/tools/pd-api-bench/config" + "go.etcd.io/etcd/clientv3" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" ) var ( - qps = flag.Int64("qps", 1000, "qps") - burst = flag.Int64("burst", 1, "burst") - - httpCases = flag.String("http-cases", "", "http api cases") - gRPCCases = flag.String("grpc-cases", "", "grpc cases") + qps, burst int64 + httpCases, gRPCCases string ) -var base = int64(time.Second) / int64(time.Microsecond) +var ( + pdAPIExecutionHistogram = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: "pd", + Subsystem: "api_bench", + Name: "pd_api_execution_duration_seconds", + Help: "Bucketed histogram of all pd api execution time (s)", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 20), // 1ms ~ 524s + }, []string{"type"}) + + pdAPIRequestCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "pd", + Subsystem: "api_bench", + Name: "pd_api_request_total", + Help: "Counter of the pd http api requests", + }, []string{"type", "result"}) +) func main() { + prometheus.MustRegister(pdAPIExecutionHistogram) + prometheus.MustRegister(pdAPIRequestCounter) + + ctx, cancel := context.WithCancel(context.Background()) flagSet := flag.NewFlagSet("api-bench", flag.ContinueOnError) flagSet.ParseErrorsWhitelist.UnknownFlags = true + flagSet.Int64Var(&qps, "qps", 1, "qps") + flagSet.Int64Var(&burst, "burst", 1, "burst") + flagSet.StringVar(&httpCases, "http-cases", "", "http api cases") + flagSet.StringVar(&gRPCCases, "grpc-cases", "", "grpc cases") cfg := config.NewConfig(flagSet) err := cfg.Parse(os.Args[1:]) defer logutil.LogPanic() @@ -68,7 +98,6 @@ func main() { } else { log.Fatal("initialize logger error", zap.Error(err)) } - ctx, cancel := context.WithCancel(context.Background()) sc := make(chan os.Signal, 1) signal.Notify(sc, syscall.SIGHUP, @@ -82,75 +111,6 @@ func main() { cancel() }() - hcaseStr := strings.Split(*httpCases, ",") - for _, str := range hcaseStr { - caseQPS := int64(0) - caseBurst := int64(0) - cStr := "" - - strs := strings.Split(str, "-") - // to get case name - strsa := strings.Split(strs[0], "+") - cStr = strsa[0] - // to get case Burst - if len(strsa) > 1 { - caseBurst, err = strconv.ParseInt(strsa[1], 10, 64) - if err != nil { - log.Error("parse burst failed for case", zap.String("case", cStr), zap.String("config", strsa[1])) - } - } - // to get case qps - if len(strs) > 1 { - strsb := strings.Split(strs[1], "+") - caseQPS, err = strconv.ParseInt(strsb[0], 10, 64) - if err != nil { - if err != nil { - log.Error("parse qps failed for case", zap.String("case", cStr), zap.String("config", strsb[0])) - } - } - // to get case Burst - if len(strsb) > 1 { - caseBurst, err = strconv.ParseInt(strsb[1], 10, 64) - if err != nil { - log.Error("parse burst failed for case", zap.String("case", cStr), zap.String("config", strsb[1])) - } - } - } - if len(cStr) == 0 { - continue - } - if fn, ok := cases.HTTPCaseFnMap[cStr]; ok { - var cas cases.HTTPCase - if cas, ok = cases.HTTPCaseMap[cStr]; !ok { - cas = fn() - cases.HTTPCaseMap[cStr] = cas - } - if caseBurst > 0 { - cas.SetBurst(caseBurst) - } else if *burst > 0 { - cas.SetBurst(*burst) - } - if caseQPS > 0 { - cas.SetQPS(caseQPS) - } else if *qps > 0 { - cas.SetQPS(*qps) - } - } else { - log.Warn("HTTP case not implemented", zap.String("case", cStr)) - } - } - gcaseStr := strings.Split(*gRPCCases, ",") - // todo: see pull 7345 - for _, str := range gcaseStr { - if fn, ok := cases.GRPCCaseFnMap[str]; ok { - if _, ok = cases.GRPCCaseMap[str]; !ok { - cases.GRPCCaseMap[str] = fn() - } - } else { - log.Warn("gRPC case not implemented", zap.String("case", str)) - } - } - if cfg.Client == 0 { log.Error("concurrency == 0, exit") return @@ -158,23 +118,43 @@ func main() { pdClis := make([]pd.Client, cfg.Client) for i := int64(0); i < cfg.Client; i++ { pdClis[i] = newPDClient(ctx, cfg) + pdClis[i].UpdateOption(pd.EnableFollowerHandle, true) + } + etcdClis := make([]*clientv3.Client, cfg.Client) + for i := int64(0); i < cfg.Client; i++ { + etcdClis[i] = newEtcdClient(cfg) } httpClis := make([]pdHttp.Client, cfg.Client) for i := int64(0); i < cfg.Client; i++ { sd := pdClis[i].GetServiceDiscovery() - httpClis[i] = pdHttp.NewClientWithServiceDiscovery("tools-api-bench", sd, pdHttp.WithTLSConfig(loadTLSConfig(cfg))) + httpClis[i] = pdHttp.NewClientWithServiceDiscovery("tools-api-bench", sd, pdHttp.WithTLSConfig(loadTLSConfig(cfg)), pdHttp.WithMetrics(pdAPIRequestCounter, pdAPIExecutionHistogram)) } err = cases.InitCluster(ctx, pdClis[0], httpClis[0]) if err != nil { log.Fatal("InitCluster error", zap.Error(err)) } - for _, hcase := range cases.HTTPCaseMap { - handleHTTPCase(ctx, hcase, httpClis) + coordinator := cases.NewCoordinator(ctx, httpClis, pdClis, etcdClis) + + hcaseStr := strings.Split(httpCases, ",") + for _, str := range hcaseStr { + name, cfg := parseCaseNameAndConfig(str) + if len(name) == 0 { + continue + } + coordinator.SetHTTPCase(name, cfg) } - for _, gcase := range cases.GRPCCaseMap { - handleGRPCCase(ctx, gcase, pdClis) + gcaseStr := strings.Split(gRPCCases, ",") + for _, str := range gcaseStr { + name, cfg := parseCaseNameAndConfig(str) + if len(name) == 0 { + continue + } + coordinator.SetGRPCCase(name, cfg) } + cfg.InitCoordinator(coordinator) + + go runHTTPServer(cfg, coordinator) <-ctx.Done() for _, cli := range pdClis { @@ -183,6 +163,9 @@ func main() { for _, cli := range httpClis { cli.Close() } + for _, cli := range etcdClis { + cli.Close() + } log.Info("Exit") switch sig { case syscall.SIGTERM: @@ -192,80 +175,209 @@ func main() { } } -func handleGRPCCase(ctx context.Context, gcase cases.GRPCCase, clients []pd.Client) { - qps := gcase.GetQPS() - burst := gcase.GetBurst() - cliNum := int64(len(clients)) - tt := time.Duration(base/qps*burst*cliNum) * time.Microsecond - log.Info("begin to run gRPC case", zap.String("case", gcase.Name()), zap.Int64("qps", qps), zap.Int64("burst", burst), zap.Duration("interval", tt)) - for _, cli := range clients { - go func(cli pd.Client) { - var ticker = time.NewTicker(tt) - defer ticker.Stop() - for { - select { - case <-ticker.C: - for i := int64(0); i < burst; i++ { - err := gcase.Unary(ctx, cli) - if err != nil { - log.Error("meet erorr when doing gRPC request", zap.String("case", gcase.Name()), zap.Error(err)) - } - } - case <-ctx.Done(): - log.Info("Got signal to exit handleGetRegion") - return - } +func exit(code int) { + os.Exit(code) +} + +func parseCaseNameAndConfig(str string) (string, *cases.Config) { + var err error + cfg := &cases.Config{} + name := "" + strs := strings.Split(str, "-") + // to get case name + strsa := strings.Split(strs[0], "+") + name = strsa[0] + // to get case Burst + if len(strsa) > 1 { + cfg.Burst, err = strconv.ParseInt(strsa[1], 10, 64) + if err != nil { + log.Error("parse burst failed for case", zap.String("case", name), zap.String("config", strsa[1])) + } + } + // to get case qps + if len(strs) > 1 { + strsb := strings.Split(strs[1], "+") + cfg.QPS, err = strconv.ParseInt(strsb[0], 10, 64) + if err != nil { + if err != nil { + log.Error("parse qps failed for case", zap.String("case", name), zap.String("config", strsb[0])) } - }(cli) + } + // to get case Burst + if len(strsb) > 1 { + cfg.Burst, err = strconv.ParseInt(strsb[1], 10, 64) + if err != nil { + log.Error("parse burst failed for case", zap.String("case", name), zap.String("config", strsb[1])) + } + } + } + if cfg.QPS == 0 && qps > 0 { + cfg.QPS = qps + } + if cfg.Burst == 0 && burst > 0 { + cfg.Burst = burst } + return name, cfg } -func handleHTTPCase(ctx context.Context, hcase cases.HTTPCase, httpClis []pdHttp.Client) { - qps := hcase.GetQPS() - burst := hcase.GetBurst() - cliNum := int64(len(httpClis)) - tt := time.Duration(base/qps*burst*cliNum) * time.Microsecond - log.Info("begin to run http case", zap.String("case", hcase.Name()), zap.Int64("qps", qps), zap.Int64("burst", burst), zap.Duration("interval", tt)) - for _, hCli := range httpClis { - go func(hCli pdHttp.Client) { - var ticker = time.NewTicker(tt) - defer ticker.Stop() - for { - select { - case <-ticker.C: - for i := int64(0); i < burst; i++ { - err := hcase.Do(ctx, hCli) - if err != nil { - log.Error("meet erorr when doing HTTP request", zap.String("case", hcase.Name()), zap.Error(err)) - } - } - case <-ctx.Done(): - log.Info("Got signal to exit handleScanRegions") - return - } +func runHTTPServer(cfg *config.Config, co *cases.Coordinator) { + gin.SetMode(gin.ReleaseMode) + engine := gin.New() + engine.Use(gin.Recovery()) + engine.Use(cors.Default()) + engine.Use(gzip.Gzip(gzip.DefaultCompression)) + engine.GET("metrics", utils.PromHandler()) + // profile API + pprof.Register(engine) + + getCfg := func(c *gin.Context) *cases.Config { + var err error + cfg := &cases.Config{} + qpsStr := c.Query("qps") + if len(qpsStr) > 0 { + cfg.QPS, err = strconv.ParseInt(qpsStr, 10, 64) + if err != nil { + c.String(http.StatusBadRequest, err.Error()) + } + } + burstStr := c.Query("burst") + if len(burstStr) > 0 { + cfg.Burst, err = strconv.ParseInt(burstStr, 10, 64) + if err != nil { + c.String(http.StatusBadRequest, err.Error()) } - }(hCli) + } + return cfg } -} -func exit(code int) { - os.Exit(code) + engine.POST("config/http/all", func(c *gin.Context) { + var input map[string]cases.Config + if err := c.ShouldBindJSON(&input); err != nil { + c.String(http.StatusBadRequest, err.Error()) + return + } + for name, cfg := range input { + co.SetHTTPCase(name, &cfg) + } + c.String(http.StatusOK, "") + }) + engine.POST("config/http/:name", func(c *gin.Context) { + name := c.Param("name") + cfg := getCfg(c) + co.SetHTTPCase(name, cfg) + c.String(http.StatusOK, "") + }) + engine.POST("config/grpc/all", func(c *gin.Context) { + var input map[string]cases.Config + if err := c.ShouldBindJSON(&input); err != nil { + c.String(http.StatusBadRequest, err.Error()) + return + } + for name, cfg := range input { + co.SetGRPCCase(name, &cfg) + } + c.String(http.StatusOK, "") + }) + engine.POST("config/grpc/:name", func(c *gin.Context) { + name := c.Param("name") + cfg := getCfg(c) + co.SetGRPCCase(name, cfg) + c.String(http.StatusOK, "") + }) + engine.POST("config/etcd/all", func(c *gin.Context) { + var input map[string]cases.Config + if err := c.ShouldBindJSON(&input); err != nil { + c.String(http.StatusBadRequest, err.Error()) + return + } + for name, cfg := range input { + co.SetETCDCase(name, &cfg) + } + c.String(http.StatusOK, "") + }) + engine.POST("config/etcd/:name", func(c *gin.Context) { + name := c.Param("name") + cfg := getCfg(c) + co.SetETCDCase(name, cfg) + c.String(http.StatusOK, "") + }) + + engine.GET("config/http/all", func(c *gin.Context) { + all := co.GetAllHTTPCases() + c.IndentedJSON(http.StatusOK, all) + }) + engine.GET("config/http/:name", func(c *gin.Context) { + name := c.Param("name") + cfg, err := co.GetHTTPCase(name) + if err != nil { + c.String(http.StatusBadRequest, err.Error()) + return + } + c.IndentedJSON(http.StatusOK, cfg) + }) + engine.GET("config/grpc/all", func(c *gin.Context) { + all := co.GetAllGRPCCases() + c.IndentedJSON(http.StatusOK, all) + }) + engine.GET("config/grpc/:name", func(c *gin.Context) { + name := c.Param("name") + cfg, err := co.GetGRPCCase(name) + if err != nil { + c.String(http.StatusBadRequest, err.Error()) + return + } + c.IndentedJSON(http.StatusOK, cfg) + }) + engine.GET("config/etcd/all", func(c *gin.Context) { + all := co.GetAllETCDCases() + c.IndentedJSON(http.StatusOK, all) + }) + engine.GET("config/etcd/:name", func(c *gin.Context) { + name := c.Param("name") + cfg, err := co.GetETCDCase(name) + if err != nil { + c.String(http.StatusBadRequest, err.Error()) + return + } + c.IndentedJSON(http.StatusOK, cfg) + }) + // nolint + engine.Run(cfg.StatusAddr) } -func trimHTTPPrefix(str string) string { - str = strings.TrimPrefix(str, "http://") - str = strings.TrimPrefix(str, "https://") - return str +const ( + keepaliveTime = 10 * time.Second + keepaliveTimeout = 3 * time.Second +) + +func newEtcdClient(cfg *config.Config) *clientv3.Client { + lgc := zap.NewProductionConfig() + lgc.Encoding = log.ZapEncodingName + tlsCfg, err := tlsutil.TLSConfig{ + CAPath: cfg.CaPath, + CertPath: cfg.CertPath, + KeyPath: cfg.KeyPath, + }.ToTLSConfig() + if err != nil { + log.Fatal("fail to create etcd client", zap.Error(err)) + return nil + } + clientConfig := clientv3.Config{ + Endpoints: []string{cfg.PDAddr}, + DialTimeout: keepaliveTimeout, + TLS: tlsCfg, + LogConfig: &lgc, + } + client, err := clientv3.New(clientConfig) + if err != nil { + log.Fatal("fail to create pd client", zap.Error(err)) + } + return client } // newPDClient returns a pd client. func newPDClient(ctx context.Context, cfg *config.Config) pd.Client { - const ( - keepaliveTime = 10 * time.Second - keepaliveTimeout = 3 * time.Second - ) - - addrs := []string{trimHTTPPrefix(cfg.PDAddr)} + addrs := []string{cfg.PDAddr} pdCli, err := pd.NewClientWithContext(ctx, addrs, pd.SecurityOption{ CAPath: cfg.CaPath, CertPath: cfg.CertPath, diff --git a/tools/pd-ctl/main.go b/tools/pd-ctl/main.go index 1478a13fcda..0d052d95680 100644 --- a/tools/pd-ctl/main.go +++ b/tools/pd-ctl/main.go @@ -20,8 +20,10 @@ import ( "os/signal" "syscall" + "github.com/pingcap/log" "github.com/tikv/pd/tools/pd-ctl/pdctl" "github.com/tikv/pd/tools/pd-ctl/pdctl/command" + "go.uber.org/zap/zapcore" ) func main() { @@ -51,6 +53,7 @@ func main() { } }() + log.SetLevel(zapcore.FatalLevel) var inputs []string stat, _ := os.Stdin.Stat() if (stat.Mode() & os.ModeCharDevice) == 0 { diff --git a/tools/pd-ctl/pdctl/command/global.go b/tools/pd-ctl/pdctl/command/global.go index 850f5d59f69..806ad4ecc53 100644 --- a/tools/pd-ctl/pdctl/command/global.go +++ b/tools/pd-ctl/pdctl/command/global.go @@ -43,8 +43,7 @@ func SetNewPDClient(addrs []string, opts ...pd.ClientOption) { if PDCli != nil { PDCli.Close() } - withOpts := append(opts, pd.WithLoggerRedirection("fatal", "")) - PDCli = pd.NewClient(pdControlCallerID, addrs, withOpts...) + PDCli = pd.NewClient(pdControlCallerID, addrs, opts...) } // TODO: replace dialClient with PDCli @@ -69,7 +68,7 @@ func InitHTTPSClient(pdAddrs, caPath, certPath, keyPath string) error { &http.Transport{TLSClientConfig: tlsConfig}, pdControlCallerID), } - SetNewPDClient(strings.Split(pdAddrs, ","), pd.WithTLSConfig(tlsConfig)) + SetNewPDClient(strings.Split(pdAddrs, ","), pd.WithTLSConfig(tlsConfig.Clone())) return nil } diff --git a/tools/pd-ctl/tests/cert_opt.sh b/tools/pd-ctl/tests/cert_opt.sh new file mode 100755 index 00000000000..2f1a410f56f --- /dev/null +++ b/tools/pd-ctl/tests/cert_opt.sh @@ -0,0 +1,55 @@ +#!/bin/bash +cert_dir="$2" + +function generate_certs() { + if [[ ! -z "$cert_dir" ]]; then + cd "$cert_dir" || exit 255 # Change to the specified directory + fi + + if ! [[ "$0" =~ "cert_opt.sh" ]]; then + echo "must be run from 'cert'" + exit 255 + fi + + if ! which openssl; then + echo "openssl is not installed" + exit 255 + fi + + # Generate CA private key and self-signed certificate + openssl genpkey -algorithm RSA -out ca-key.pem + openssl req -new -x509 -key ca-key.pem -out ca.pem -days 1 -subj "/CN=ca" + # pd-server + openssl genpkey -algorithm RSA -out pd-server-key.pem + openssl req -new -key pd-server-key.pem -out pd-server.csr -subj "/CN=pd-server" + + # Add IP address as a SAN + echo "subjectAltName = IP:127.0.0.1" > extfile.cnf + openssl x509 -req -in pd-server.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out pd-server.pem -days 1 -extfile extfile.cnf + + # Clean up the temporary extension file + rm extfile.cnf + + # client + openssl genpkey -algorithm RSA -out client-key.pem + openssl req -new -key client-key.pem -out client.csr -subj "/CN=client" + openssl x509 -req -in client.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out client.pem -days 1 +} + +function cleanup_certs() { + if [[ ! -z "$cert_dir" ]]; then + cd "$cert_dir" || exit 255 # Change to the specified directory + fi + + rm -f ca.pem ca-key.pem ca.srl + rm -f pd-server.pem pd-server-key.pem pd-server.csr + rm -f client.pem client-key.pem client.csr +} + +if [[ "$1" == "generate" ]]; then + generate_certs +elif [[ "$1" == "cleanup" ]]; then + cleanup_certs +else + echo "Usage: $0 [generate|cleanup] " +fi \ No newline at end of file diff --git a/tools/pd-ctl/tests/config/config_test.go b/tools/pd-ctl/tests/config/config_test.go index d5d9eba6c9e..4a585851227 100644 --- a/tools/pd-ctl/tests/config/config_test.go +++ b/tools/pd-ctl/tests/config/config_test.go @@ -1111,6 +1111,36 @@ func (suite *configTestSuite) checkPDServerConfig(cluster *pdTests.TestCluster) re.Equal(int(3), conf.FlowRoundByDigit) } +func (suite *configTestSuite) TestMicroServiceConfig() { + suite.env.RunTestInTwoModes(suite.checkMicroServiceConfig) +} + +func (suite *configTestSuite) checkMicroServiceConfig(cluster *pdTests.TestCluster) { + re := suite.Require() + leaderServer := cluster.GetLeaderServer() + pdAddr := leaderServer.GetAddr() + cmd := ctl.GetRootCmd() + + store := &metapb.Store{ + Id: 1, + State: metapb.StoreState_Up, + LastHeartbeat: time.Now().UnixNano(), + } + pdTests.MustPutStore(re, cluster, store) + svr := leaderServer.GetServer() + output, err := tests.ExecuteCommand(cmd, "-u", pdAddr, "config", "show", "all") + re.NoError(err) + cfg := config.Config{} + re.NoError(json.Unmarshal(output, &cfg)) + re.True(svr.GetMicroServiceConfig().EnableSchedulingFallback) + re.True(cfg.MicroService.EnableSchedulingFallback) + // config set enable-scheduling-fallback + args := []string{"-u", pdAddr, "config", "set", "enable-scheduling-fallback", "false"} + _, err = tests.ExecuteCommand(cmd, args...) + re.NoError(err) + re.False(svr.GetMicroServiceConfig().EnableSchedulingFallback) +} + func assertBundles(re *require.Assertions, a, b []placement.GroupBundle) { re.Len(b, len(a)) for i := 0; i < len(a); i++ { diff --git a/tools/pd-ctl/tests/member/member_test.go b/tools/pd-ctl/tests/member/member_test.go index d376948c2ed..d6888858a3b 100644 --- a/tools/pd-ctl/tests/member/member_test.go +++ b/tools/pd-ctl/tests/member/member_test.go @@ -87,14 +87,14 @@ func TestMember(t *testing.T) { // member delete name err = svr.Destroy() re.NoError(err) - members, err := etcdutil.ListEtcdMembers(client) + members, err := etcdutil.ListEtcdMembers(ctx, client) re.NoError(err) re.Len(members.Members, 3) args = []string{"-u", pdAddr, "member", "delete", "name", name} _, err = tests.ExecuteCommand(cmd, args...) re.NoError(err) testutil.Eventually(re, func() bool { - members, err = etcdutil.ListEtcdMembers(client) + members, err = etcdutil.ListEtcdMembers(ctx, client) re.NoError(err) return len(members.Members) == 2 }) @@ -104,7 +104,7 @@ func TestMember(t *testing.T) { _, err = tests.ExecuteCommand(cmd, args...) re.NoError(err) testutil.Eventually(re, func() bool { - members, err = etcdutil.ListEtcdMembers(client) + members, err = etcdutil.ListEtcdMembers(ctx, client) re.NoError(err) return len(members.Members) == 2 }) diff --git a/tools/pd-ctl/tests/operator/operator_test.go b/tools/pd-ctl/tests/operator/operator_test.go index bf361f962ce..335e6635948 100644 --- a/tools/pd-ctl/tests/operator/operator_test.go +++ b/tools/pd-ctl/tests/operator/operator_test.go @@ -226,7 +226,7 @@ func (suite *operatorTestSuite) checkOperator(cluster *pdTests.TestCluster) { _, err = tests.ExecuteCommand(cmd, "config", "set", "enable-placement-rules", "true") re.NoError(err) if sche := cluster.GetSchedulingPrimaryServer(); sche != nil { - // wait for the scheduler server to update the config + // wait for the scheduling server to update the config testutil.Eventually(re, func() bool { return sche.GetCluster().GetCheckerConfig().IsPlacementRulesEnabled() }) diff --git a/tools/pd-ctl/tests/store/store_test.go b/tools/pd-ctl/tests/store/store_test.go index 41d02a98aa3..c8103414e9c 100644 --- a/tools/pd-ctl/tests/store/store_test.go +++ b/tools/pd-ctl/tests/store/store_test.go @@ -17,6 +17,10 @@ package store_test import ( "context" "encoding/json" + "os" + "os/exec" + "path/filepath" + "strings" "testing" "time" @@ -25,10 +29,13 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/storelimit" "github.com/tikv/pd/pkg/statistics/utils" + "github.com/tikv/pd/pkg/utils/grpcutil" "github.com/tikv/pd/server/api" + "github.com/tikv/pd/server/config" pdTests "github.com/tikv/pd/tests" ctl "github.com/tikv/pd/tools/pd-ctl/pdctl" "github.com/tikv/pd/tools/pd-ctl/tests" + "go.etcd.io/etcd/pkg/transport" ) func TestStore(t *testing.T) { @@ -544,3 +551,95 @@ func TestTombstoneStore(t *testing.T) { re.Contains(message, "2") re.Contains(message, "3") } + +func TestStoreTLS(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + certPath := "../cert" + certScript := "../cert_opt.sh" + // generate certs + if err := os.Mkdir(certPath, 0755); err != nil { + t.Fatal(err) + } + if err := exec.Command(certScript, "generate", certPath).Run(); err != nil { + t.Fatal(err) + } + defer func() { + if err := exec.Command(certScript, "cleanup", certPath).Run(); err != nil { + t.Fatal(err) + } + if err := os.RemoveAll(certPath); err != nil { + t.Fatal(err) + } + }() + + tlsInfo := transport.TLSInfo{ + KeyFile: filepath.Join(certPath, "pd-server-key.pem"), + CertFile: filepath.Join(certPath, "pd-server.pem"), + TrustedCAFile: filepath.Join(certPath, "ca.pem"), + } + cluster, err := pdTests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { + conf.Security.TLSConfig = grpcutil.TLSConfig{ + KeyPath: tlsInfo.KeyFile, + CertPath: tlsInfo.CertFile, + CAPath: tlsInfo.TrustedCAFile, + } + conf.AdvertiseClientUrls = strings.ReplaceAll(conf.AdvertiseClientUrls, "http", "https") + conf.ClientUrls = strings.ReplaceAll(conf.ClientUrls, "http", "https") + conf.AdvertisePeerUrls = strings.ReplaceAll(conf.AdvertisePeerUrls, "http", "https") + conf.PeerUrls = strings.ReplaceAll(conf.PeerUrls, "http", "https") + conf.InitialCluster = strings.ReplaceAll(conf.InitialCluster, "http", "https") + }) + re.NoError(err) + err = cluster.RunInitialServers() + re.NoError(err) + cluster.WaitLeader() + cmd := ctl.GetRootCmd() + + stores := []*api.StoreInfo{ + { + Store: &api.MetaStore{ + Store: &metapb.Store{ + Id: 1, + State: metapb.StoreState_Up, + NodeState: metapb.NodeState_Serving, + LastHeartbeat: time.Now().UnixNano(), + }, + StateName: metapb.StoreState_Up.String(), + }, + }, + { + Store: &api.MetaStore{ + Store: &metapb.Store{ + Id: 2, + State: metapb.StoreState_Up, + NodeState: metapb.NodeState_Serving, + LastHeartbeat: time.Now().UnixNano(), + }, + StateName: metapb.StoreState_Up.String(), + }, + }, + } + + leaderServer := cluster.GetLeaderServer() + re.NoError(leaderServer.BootstrapCluster()) + + for _, store := range stores { + pdTests.MustPutStore(re, cluster, store.Store.Store) + } + defer cluster.Destroy() + + pdAddr := cluster.GetConfig().GetClientURL() + pdAddr = strings.ReplaceAll(pdAddr, "http", "https") + // store command + args := []string{"-u", pdAddr, "store", + "--cacert=../cert/ca.pem", + "--cert=../cert/client.pem", + "--key=../cert/client-key.pem"} + output, err := tests.ExecuteCommand(cmd, args...) + re.NoError(err) + storesInfo := new(api.StoresInfo) + re.NoError(json.Unmarshal(output, &storesInfo)) + tests.CheckStoresInfo(re, storesInfo.Stores, stores) +} diff --git a/tools/pd-heartbeat-bench/config-template.toml b/tools/pd-heartbeat-bench/config-template.toml index d2a0fa844fe..4964535a772 100644 --- a/tools/pd-heartbeat-bench/config-template.toml +++ b/tools/pd-heartbeat-bench/config-template.toml @@ -7,9 +7,9 @@ key-length = 56 replica = 3 leader-update-ratio = 0.06 -epoch-update-ratio = 0.04 -space-update-ratio = 0.15 -flow-update-ratio = 0.35 +epoch-update-ratio = 0.0 +space-update-ratio = 0.0 +flow-update-ratio = 0.0 no-update-ratio = 0.0 sample = false diff --git a/tools/pd-heartbeat-bench/config/config.go b/tools/pd-heartbeat-bench/config/config.go index be669c00666..90254014d82 100644 --- a/tools/pd-heartbeat-bench/config/config.go +++ b/tools/pd-heartbeat-bench/config/config.go @@ -1,7 +1,6 @@ package config import ( - "math" "sync/atomic" "github.com/BurntSushi/toml" @@ -21,7 +20,7 @@ const ( defaultEpochUpdateRatio = 0.04 defaultSpaceUpdateRatio = 0.15 defaultFlowUpdateRatio = 0.35 - defaultNoUpdateRatio = 0 + defaultReportRatio = 1 defaultRound = 0 defaultSample = false @@ -39,6 +38,8 @@ type Config struct { Logger *zap.Logger LogProps *log.ZapProperties + Security configutil.SecurityConfig `toml:"security" json:"security"` + StoreCount int `toml:"store-count" json:"store-count"` RegionCount int `toml:"region-count" json:"region-count"` KeyLength int `toml:"key-length" json:"key-length"` @@ -47,7 +48,7 @@ type Config struct { EpochUpdateRatio float64 `toml:"epoch-update-ratio" json:"epoch-update-ratio"` SpaceUpdateRatio float64 `toml:"space-update-ratio" json:"space-update-ratio"` FlowUpdateRatio float64 `toml:"flow-update-ratio" json:"flow-update-ratio"` - NoUpdateRatio float64 `toml:"no-update-ratio" json:"no-update-ratio"` + ReportRatio float64 `toml:"report-ratio" json:"report-ratio"` Sample bool `toml:"sample" json:"sample"` Round int `toml:"round" json:"round"` } @@ -59,8 +60,12 @@ func NewConfig() *Config { fs := cfg.flagSet fs.ParseErrorsWhitelist.UnknownFlags = true fs.StringVar(&cfg.configFile, "config", "", "config file") - fs.StringVar(&cfg.PDAddr, "pd", "127.0.0.1:2379", "pd address") + fs.StringVar(&cfg.PDAddr, "pd-endpoints", "127.0.0.1:2379", "pd address") + fs.StringVar(&cfg.Log.File.Filename, "log-file", "", "log file path") fs.StringVar(&cfg.StatusAddr, "status-addr", "127.0.0.1:20180", "status address") + fs.StringVar(&cfg.Security.CAPath, "cacert", "", "path of file that contains list of trusted TLS CAs") + fs.StringVar(&cfg.Security.CertPath, "cert", "", "path of file that contains X509 certificate in PEM format") + fs.StringVar(&cfg.Security.KeyPath, "key", "", "path of file that contains X509 key in PEM format") return cfg } @@ -132,8 +137,8 @@ func (c *Config) Adjust(meta *toml.MetaData) { if !meta.IsDefined("flow-update-ratio") { configutil.AdjustFloat64(&c.FlowUpdateRatio, defaultFlowUpdateRatio) } - if !meta.IsDefined("no-update-ratio") { - configutil.AdjustFloat64(&c.NoUpdateRatio, defaultNoUpdateRatio) + if !meta.IsDefined("report-ratio") { + configutil.AdjustFloat64(&c.ReportRatio, defaultReportRatio) } if !meta.IsDefined("sample") { c.Sample = defaultSample @@ -142,24 +147,20 @@ func (c *Config) Adjust(meta *toml.MetaData) { // Validate is used to validate configurations func (c *Config) Validate() error { - if c.LeaderUpdateRatio < 0 || c.LeaderUpdateRatio > 1 { - return errors.Errorf("leader-update-ratio must be in [0, 1]") - } - if c.EpochUpdateRatio < 0 || c.EpochUpdateRatio > 1 { - return errors.Errorf("epoch-update-ratio must be in [0, 1]") + if c.ReportRatio < 0 || c.ReportRatio > 1 { + return errors.Errorf("report-ratio must be in [0, 1]") } - if c.SpaceUpdateRatio < 0 || c.SpaceUpdateRatio > 1 { - return errors.Errorf("space-update-ratio must be in [0, 1]") + if c.LeaderUpdateRatio > c.ReportRatio || c.LeaderUpdateRatio < 0 { + return errors.Errorf("leader-update-ratio can not be negative or larger than report-ratio") } - if c.FlowUpdateRatio < 0 || c.FlowUpdateRatio > 1 { - return errors.Errorf("flow-update-ratio must be in [0, 1]") + if c.EpochUpdateRatio > c.ReportRatio || c.EpochUpdateRatio < 0 { + return errors.Errorf("epoch-update-ratio can not be negative or larger than report-ratio") } - if c.NoUpdateRatio < 0 || c.NoUpdateRatio > 1 { - return errors.Errorf("no-update-ratio must be in [0, 1]") + if c.SpaceUpdateRatio > c.ReportRatio || c.SpaceUpdateRatio < 0 { + return errors.Errorf("space-update-ratio can not be negative or larger than report-ratio") } - max := math.Max(c.LeaderUpdateRatio, math.Max(c.EpochUpdateRatio, math.Max(c.SpaceUpdateRatio, c.FlowUpdateRatio))) - if max+c.NoUpdateRatio > 1 { - return errors.Errorf("sum of update-ratio must be in [0, 1]") + if c.FlowUpdateRatio > c.ReportRatio || c.FlowUpdateRatio < 0 { + return errors.Errorf("flow-update-ratio can not be negative or larger than report-ratio") } return nil } @@ -173,11 +174,12 @@ func (c *Config) Clone() *Config { // Options is the option of the heartbeat-bench. type Options struct { + ReportRatio atomic.Value + LeaderUpdateRatio atomic.Value EpochUpdateRatio atomic.Value SpaceUpdateRatio atomic.Value FlowUpdateRatio atomic.Value - NoUpdateRatio atomic.Value } // NewOptions creates a new option. @@ -187,7 +189,7 @@ func NewOptions(cfg *Config) *Options { o.EpochUpdateRatio.Store(cfg.EpochUpdateRatio) o.SpaceUpdateRatio.Store(cfg.SpaceUpdateRatio) o.FlowUpdateRatio.Store(cfg.FlowUpdateRatio) - o.NoUpdateRatio.Store(cfg.NoUpdateRatio) + o.ReportRatio.Store(cfg.ReportRatio) return o } @@ -211,9 +213,9 @@ func (o *Options) GetFlowUpdateRatio() float64 { return o.FlowUpdateRatio.Load().(float64) } -// GetNoUpdateRatio returns the no update ratio. -func (o *Options) GetNoUpdateRatio() float64 { - return o.NoUpdateRatio.Load().(float64) +// GetReportRatio returns the report ratio. +func (o *Options) GetReportRatio() float64 { + return o.ReportRatio.Load().(float64) } // SetOptions sets the option. @@ -222,5 +224,5 @@ func (o *Options) SetOptions(cfg *Config) { o.EpochUpdateRatio.Store(cfg.EpochUpdateRatio) o.SpaceUpdateRatio.Store(cfg.SpaceUpdateRatio) o.FlowUpdateRatio.Store(cfg.FlowUpdateRatio) - o.NoUpdateRatio.Store(cfg.NoUpdateRatio) + o.ReportRatio.Store(cfg.ReportRatio) } diff --git a/tools/pd-heartbeat-bench/main.go b/tools/pd-heartbeat-bench/main.go index 0e1af0de9ca..52591b05770 100644 --- a/tools/pd-heartbeat-bench/main.go +++ b/tools/pd-heartbeat-bench/main.go @@ -22,7 +22,6 @@ import ( "net/http" "os" "os/signal" - "strings" "sync" "sync/atomic" "syscall" @@ -38,12 +37,12 @@ import ( "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/log" "github.com/spf13/pflag" + "github.com/tikv/pd/client/grpcutil" "github.com/tikv/pd/pkg/mcs/utils" "github.com/tikv/pd/pkg/utils/logutil" "github.com/tikv/pd/tools/pd-heartbeat-bench/config" "go.etcd.io/etcd/pkg/report" "go.uber.org/zap" - "google.golang.org/grpc" ) const ( @@ -57,19 +56,16 @@ const ( var clusterID uint64 -func trimHTTPPrefix(str string) string { - str = strings.TrimPrefix(str, "http://") - str = strings.TrimPrefix(str, "https://") - return str -} - -func newClient(cfg *config.Config) pdpb.PDClient { - addr := trimHTTPPrefix(cfg.PDAddr) - cc, err := grpc.Dial(addr, grpc.WithInsecure()) +func newClient(ctx context.Context, cfg *config.Config) (pdpb.PDClient, error) { + tlsConfig, err := cfg.Security.ToTLSConfig() + if err != nil { + return nil, err + } + cc, err := grpcutil.GetClientConn(ctx, cfg.PDAddr, tlsConfig) if err != nil { - log.Fatal("failed to create gRPC connection", zap.Error(err)) + return nil, err } - return pdpb.NewPDClient(cc) + return pdpb.NewPDClient(cc), nil } func initClusterID(ctx context.Context, cli pdpb.PDClient) { @@ -255,31 +251,32 @@ func (rs *Regions) init(cfg *config.Config, options *config.Options) []int { func (rs *Regions) update(cfg *config.Config, options *config.Options, indexes []int) { rs.updateRound += 1 - rs.updateLeader = pick(indexes, cfg, options.GetLeaderUpdateRatio()) - rs.updateEpoch = pick(indexes, cfg, options.GetEpochUpdateRatio()) - rs.updateSpace = pick(indexes, cfg, options.GetSpaceUpdateRatio()) - rs.updateFlow = pick(indexes, cfg, options.GetFlowUpdateRatio()) - updatedRegionsMap := make(map[int]*pdpb.RegionHeartbeatRequest) - var awakenRegions []*pdpb.RegionHeartbeatRequest + reportRegions := pick(indexes, cfg.RegionCount, options.GetReportRatio()) + reportCount := len(reportRegions) + rs.updateLeader = pick(reportRegions, reportCount, options.GetLeaderUpdateRatio()) + rs.updateEpoch = pick(reportRegions, reportCount, options.GetEpochUpdateRatio()) + rs.updateSpace = pick(reportRegions, reportCount, options.GetSpaceUpdateRatio()) + rs.updateFlow = pick(reportRegions, reportCount, options.GetFlowUpdateRatio()) + var ( + updatedStatisticsMap = make(map[int]*pdpb.RegionHeartbeatRequest) + awakenRegions []*pdpb.RegionHeartbeatRequest + ) // update leader for _, i := range rs.updateLeader { region := rs.regions[i] region.Leader = region.Region.Peers[rs.updateRound%cfg.Replica] - updatedRegionsMap[i] = region } // update epoch for _, i := range rs.updateEpoch { region := rs.regions[i] region.Region.RegionEpoch.Version += 1 - updatedRegionsMap[i] = region } // update space for _, i := range rs.updateSpace { region := rs.regions[i] region.ApproximateSize = uint64(bytesUnit * rand.Float64()) region.ApproximateKeys = uint64(keysUint * rand.Float64()) - updatedRegionsMap[i] = region } // update flow for _, i := range rs.updateFlow { @@ -292,25 +289,34 @@ func (rs *Regions) update(cfg *config.Config, options *config.Options, indexes [ Get: uint64(queryUnit * rand.Float64()), Put: uint64(queryUnit * rand.Float64()), } - updatedRegionsMap[i] = region + updatedStatisticsMap[i] = region } // update interval for _, region := range rs.regions { region.Interval.StartTimestamp = region.Interval.EndTimestamp region.Interval.EndTimestamp = region.Interval.StartTimestamp + regionReportInterval } - for _, region := range updatedRegionsMap { + for _, i := range reportRegions { + region := rs.regions[i] + // reset the statistics of the region which is not updated + if _, exist := updatedStatisticsMap[i]; !exist { + region.BytesWritten = 0 + region.BytesRead = 0 + region.KeysWritten = 0 + region.KeysRead = 0 + region.QueryStats = &pdpb.QueryStats{} + } awakenRegions = append(awakenRegions, region) } - noUpdatedRegions := pickNoUpdatedRegions(indexes, cfg, options.GetNoUpdateRatio(), updatedRegionsMap) - for _, i := range noUpdatedRegions { - awakenRegions = append(awakenRegions, rs.regions[i]) - } + rs.awakenRegions.Store(awakenRegions) } func createHeartbeatStream(ctx context.Context, cfg *config.Config) pdpb.PD_RegionHeartbeatClient { - cli := newClient(cfg) + cli, err := newClient(ctx, cfg) + if err != nil { + log.Fatal("create client error", zap.Error(err)) + } stream, err := cli.RegionHeartbeat(ctx) if err != nil { log.Fatal("create stream error", zap.Error(err)) @@ -359,7 +365,7 @@ func (rs *Regions) handleRegionHeartbeat(wg *sync.WaitGroup, stream pdpb.PD_Regi return } } - log.Info("store finish one round region heartbeat", zap.Uint64("store-id", storeID), zap.Duration("cost-time", time.Since(start))) + log.Info("store finish one round region heartbeat", zap.Uint64("store-id", storeID), zap.Duration("cost-time", time.Since(start)), zap.Int("reported-region-count", len(regions))) } // Stores contains store stats with lock. @@ -395,7 +401,14 @@ func (s *Stores) update(rs *Regions) { }, } } - for _, region := range rs.regions { + var toUpdate []*pdpb.RegionHeartbeatRequest + updatedRegions := rs.awakenRegions.Load() + if updatedRegions == nil { + toUpdate = rs.regions + } else { + toUpdate = updatedRegions.([]*pdpb.RegionHeartbeatRequest) + } + for _, region := range toUpdate { for _, peer := range region.Region.Peers { store := stats[peer.StoreId] store.UsedSize += region.ApproximateSize @@ -425,28 +438,11 @@ func (s *Stores) update(rs *Regions) { } } -func pick(slice []int, cfg *config.Config, ratio float64) []int { - rand.Shuffle(cfg.RegionCount, func(i, j int) { +func pick(slice []int, total int, ratio float64) []int { + rand.Shuffle(total, func(i, j int) { slice[i], slice[j] = slice[j], slice[i] }) - return append(slice[:0:0], slice[0:int(float64(cfg.RegionCount)*ratio)]...) -} - -func pickNoUpdatedRegions(slice []int, cfg *config.Config, ratio float64, updatedMap map[int]*pdpb.RegionHeartbeatRequest) []int { - if ratio == 0 { - return nil - } - rand.Shuffle(cfg.RegionCount, func(i, j int) { - slice[i], slice[j] = slice[j], slice[i] - }) - NoUpdatedRegionsNum := int(float64(cfg.RegionCount) * ratio) - res := make([]int, 0, NoUpdatedRegionsNum) - for i := 0; len(res) < NoUpdatedRegionsNum; i++ { - if _, ok := updatedMap[slice[i]]; !ok { - res = append(res, slice[i]) - } - } - return res + return append(slice[:0:0], slice[0:int(float64(total)*ratio)]...) } func main() { @@ -487,7 +483,10 @@ func main() { sig = <-sc cancel() }() - cli := newClient(cfg) + cli, err := newClient(ctx, cfg) + if err != nil { + log.Fatal("create client error", zap.Error(err)) + } initClusterID(ctx, cli) go runHTTPServer(cfg, options) regions := new(Regions) @@ -604,7 +603,7 @@ func runHTTPServer(cfg *config.Config, options *config.Options) { newCfg.LeaderUpdateRatio = options.GetLeaderUpdateRatio() newCfg.EpochUpdateRatio = options.GetEpochUpdateRatio() newCfg.SpaceUpdateRatio = options.GetSpaceUpdateRatio() - newCfg.NoUpdateRatio = options.GetNoUpdateRatio() + newCfg.ReportRatio = options.GetReportRatio() if err := c.BindJSON(&newCfg); err != nil { c.String(http.StatusBadRequest, err.Error()) return @@ -622,7 +621,7 @@ func runHTTPServer(cfg *config.Config, options *config.Options) { output.LeaderUpdateRatio = options.GetLeaderUpdateRatio() output.EpochUpdateRatio = options.GetEpochUpdateRatio() output.SpaceUpdateRatio = options.GetSpaceUpdateRatio() - output.NoUpdateRatio = options.GetNoUpdateRatio() + output.ReportRatio = options.GetReportRatio() c.IndentedJSON(http.StatusOK, output) })